summaryrefslogtreecommitdiffstats
path: root/src/core/hle/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/hle/kernel')
-rw-r--r--src/core/hle/kernel/arch/arm64/k_memory_region_device_types.inc5
-rw-r--r--src/core/hle/kernel/board/nintendo/nx/k_memory_layout.h12
-rw-r--r--src/core/hle/kernel/board/nintendo/nx/k_memory_region_device_types.inc5
-rw-r--r--src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp47
-rw-r--r--src/core/hle/kernel/board/nintendo/nx/k_system_control.h6
-rw-r--r--src/core/hle/kernel/board/nintendo/nx/secure_monitor.h5
-rw-r--r--src/core/hle/kernel/code_set.cpp5
-rw-r--r--src/core/hle/kernel/code_set.h5
-rw-r--r--src/core/hle/kernel/global_scheduler_context.cpp12
-rw-r--r--src/core/hle/kernel/global_scheduler_context.h8
-rw-r--r--src/core/hle/kernel/hle_ipc.cpp65
-rw-r--r--src/core/hle/kernel/hle_ipc.h45
-rw-r--r--src/core/hle/kernel/init/init_slab_setup.cpp106
-rw-r--r--src/core/hle/kernel/init/init_slab_setup.h10
-rw-r--r--src/core/hle/kernel/initial_process.h22
-rw-r--r--src/core/hle/kernel/k_address_arbiter.cpp32
-rw-r--r--src/core/hle/kernel/k_address_arbiter.h28
-rw-r--r--src/core/hle/kernel/k_address_space_info.cpp9
-rw-r--r--src/core/hle/kernel/k_address_space_info.h5
-rw-r--r--src/core/hle/kernel/k_affinity_mask.h8
-rw-r--r--src/core/hle/kernel/k_auto_object.cpp5
-rw-r--r--src/core/hle/kernel/k_auto_object.h23
-rw-r--r--src/core/hle/kernel/k_auto_object_container.cpp5
-rw-r--r--src/core/hle/kernel/k_auto_object_container.h5
-rw-r--r--src/core/hle/kernel/k_class_token.cpp5
-rw-r--r--src/core/hle/kernel/k_class_token.h8
-rw-r--r--src/core/hle/kernel/k_client_port.cpp9
-rw-r--r--src/core/hle/kernel/k_client_port.h9
-rw-r--r--src/core/hle/kernel/k_client_session.cpp9
-rw-r--r--src/core/hle/kernel/k_client_session.h11
-rw-r--r--src/core/hle/kernel/k_code_memory.cpp34
-rw-r--r--src/core/hle/kernel/k_code_memory.h25
-rw-r--r--src/core/hle/kernel/k_condition_variable.cpp26
-rw-r--r--src/core/hle/kernel/k_condition_variable.h11
-rw-r--r--src/core/hle/kernel/k_event.cpp19
-rw-r--r--src/core/hle/kernel/k_event.h7
-rw-r--r--src/core/hle/kernel/k_handle_table.cpp17
-rw-r--r--src/core/hle/kernel/k_handle_table.h45
-rw-r--r--src/core/hle/kernel/k_interrupt_manager.cpp17
-rw-r--r--src/core/hle/kernel/k_interrupt_manager.h5
-rw-r--r--src/core/hle/kernel/k_light_condition_variable.cpp8
-rw-r--r--src/core/hle/kernel/k_light_condition_variable.h5
-rw-r--r--src/core/hle/kernel/k_light_lock.cpp8
-rw-r--r--src/core/hle/kernel/k_light_lock.h5
-rw-r--r--src/core/hle/kernel/k_linked_list.h5
-rw-r--r--src/core/hle/kernel/k_memory_block.h5
-rw-r--r--src/core/hle/kernel/k_memory_block_manager.cpp5
-rw-r--r--src/core/hle/kernel/k_memory_block_manager.h5
-rw-r--r--src/core/hle/kernel/k_memory_layout.board.nintendo_nx.cpp5
-rw-r--r--src/core/hle/kernel/k_memory_layout.cpp5
-rw-r--r--src/core/hle/kernel/k_memory_layout.h15
-rw-r--r--src/core/hle/kernel/k_memory_manager.cpp475
-rw-r--r--src/core/hle/kernel/k_memory_manager.h174
-rw-r--r--src/core/hle/kernel/k_memory_region.h5
-rw-r--r--src/core/hle/kernel/k_memory_region_type.h15
-rw-r--r--src/core/hle/kernel/k_page_bitmap.h5
-rw-r--r--src/core/hle/kernel/k_page_buffer.cpp18
-rw-r--r--src/core/hle/kernel/k_page_buffer.h27
-rw-r--r--src/core/hle/kernel/k_page_group.h (renamed from src/core/hle/kernel/k_page_linked_list.h)19
-rw-r--r--src/core/hle/kernel/k_page_heap.cpp131
-rw-r--r--src/core/hle/kernel/k_page_heap.h226
-rw-r--r--src/core/hle/kernel/k_page_table.cpp1350
-rw-r--r--src/core/hle/kernel/k_page_table.h203
-rw-r--r--src/core/hle/kernel/k_port.cpp14
-rw-r--r--src/core/hle/kernel/k_port.h7
-rw-r--r--src/core/hle/kernel/k_priority_queue.h8
-rw-r--r--src/core/hle/kernel/k_process.cpp365
-rw-r--r--src/core/hle/kernel/k_process.h97
-rw-r--r--src/core/hle/kernel/k_readable_event.cpp11
-rw-r--r--src/core/hle/kernel/k_readable_event.h11
-rw-r--r--src/core/hle/kernel/k_resource_limit.cpp26
-rw-r--r--src/core/hle/kernel/k_resource_limit.h12
-rw-r--r--src/core/hle/kernel/k_scheduler.cpp744
-rw-r--r--src/core/hle/kernel/k_scheduler.h232
-rw-r--r--src/core/hle/kernel/k_scheduler_lock.h10
-rw-r--r--src/core/hle/kernel/k_scoped_lock.h8
-rw-r--r--src/core/hle/kernel/k_scoped_resource_reservation.h8
-rw-r--r--src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h8
-rw-r--r--src/core/hle/kernel/k_server_port.cpp11
-rw-r--r--src/core/hle/kernel/k_server_port.h13
-rw-r--r--src/core/hle/kernel/k_server_session.cpp37
-rw-r--r--src/core/hle/kernel/k_server_session.h17
-rw-r--r--src/core/hle/kernel/k_session.cpp5
-rw-r--r--src/core/hle/kernel/k_session.h5
-rw-r--r--src/core/hle/kernel/k_shared_memory.cpp21
-rw-r--r--src/core/hle/kernel/k_shared_memory.h23
-rw-r--r--src/core/hle/kernel/k_shared_memory_info.h5
-rw-r--r--src/core/hle/kernel/k_slab_heap.h235
-rw-r--r--src/core/hle/kernel/k_spin_lock.cpp44
-rw-r--r--src/core/hle/kernel/k_spin_lock.h9
-rw-r--r--src/core/hle/kernel/k_synchronization_object.cpp18
-rw-r--r--src/core/hle/kernel/k_synchronization_object.h13
-rw-r--r--src/core/hle/kernel/k_system_control.h5
-rw-r--r--src/core/hle/kernel/k_thread.cpp193
-rw-r--r--src/core/hle/kernel/k_thread.h141
-rw-r--r--src/core/hle/kernel/k_thread_local_page.cpp67
-rw-r--r--src/core/hle/kernel/k_thread_local_page.h110
-rw-r--r--src/core/hle/kernel/k_thread_queue.cpp14
-rw-r--r--src/core/hle/kernel/k_thread_queue.h14
-rw-r--r--src/core/hle/kernel/k_trace.h5
-rw-r--r--src/core/hle/kernel/k_transfer_memory.cpp9
-rw-r--r--src/core/hle/kernel/k_transfer_memory.h9
-rw-r--r--src/core/hle/kernel/k_worker_task.h5
-rw-r--r--src/core/hle/kernel/k_worker_task_manager.cpp7
-rw-r--r--src/core/hle/kernel/k_worker_task_manager.h5
-rw-r--r--src/core/hle/kernel/k_writable_event.cpp9
-rw-r--r--src/core/hle/kernel/k_writable_event.h9
-rw-r--r--src/core/hle/kernel/kernel.cpp442
-rw-r--r--src/core/hle/kernel/kernel.h79
-rw-r--r--src/core/hle/kernel/memory_types.h5
-rw-r--r--src/core/hle/kernel/physical_core.cpp37
-rw-r--r--src/core/hle/kernel/physical_core.h27
-rw-r--r--src/core/hle/kernel/physical_memory.h5
-rw-r--r--src/core/hle/kernel/process_capability.cpp48
-rw-r--r--src/core/hle/kernel/process_capability.h43
-rw-r--r--src/core/hle/kernel/service_thread.cpp12
-rw-r--r--src/core/hle/kernel/service_thread.h5
-rw-r--r--src/core/hle/kernel/slab_helpers.h7
-rw-r--r--src/core/hle/kernel/svc.cpp540
-rw-r--r--src/core/hle/kernel/svc.h5
-rw-r--r--src/core/hle/kernel/svc_common.h5
-rw-r--r--src/core/hle/kernel/svc_results.h63
-rw-r--r--src/core/hle/kernel/svc_types.h7
-rw-r--r--src/core/hle/kernel/svc_wrap.h147
-rw-r--r--src/core/hle/kernel/time_manager.cpp29
-rw-r--r--src/core/hle/kernel/time_manager.h5
126 files changed, 4609 insertions, 2975 deletions
diff --git a/src/core/hle/kernel/arch/arm64/k_memory_region_device_types.inc b/src/core/hle/kernel/arch/arm64/k_memory_region_device_types.inc
index 857b512ba..2a0f2d5f7 100644
--- a/src/core/hle/kernel/arch/arm64/k_memory_region_device_types.inc
+++ b/src/core/hle/kernel/arch/arm64/k_memory_region_device_types.inc
@@ -1,6 +1,5 @@
-// Copyright 2021 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
// All architectures must define NumArchitectureDeviceRegions.
constexpr inline const auto NumArchitectureDeviceRegions = 3;
diff --git a/src/core/hle/kernel/board/nintendo/nx/k_memory_layout.h b/src/core/hle/kernel/board/nintendo/nx/k_memory_layout.h
new file mode 100644
index 000000000..d02ee61c3
--- /dev/null
+++ b/src/core/hle/kernel/board/nintendo/nx/k_memory_layout.h
@@ -0,0 +1,12 @@
+// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include "common/common_types.h"
+
+namespace Kernel {
+
+constexpr inline PAddr MainMemoryAddress = 0x80000000;
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/board/nintendo/nx/k_memory_region_device_types.inc b/src/core/hle/kernel/board/nintendo/nx/k_memory_region_device_types.inc
index 58d6c0b16..5f5ec6d5b 100644
--- a/src/core/hle/kernel/board/nintendo/nx/k_memory_region_device_types.inc
+++ b/src/core/hle/kernel/board/nintendo/nx/k_memory_region_device_types.inc
@@ -1,6 +1,5 @@
-// Copyright 2021 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
// All architectures must define NumBoardDeviceRegions.
constexpr inline const auto NumBoardDeviceRegions = 6;
diff --git a/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp b/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp
index 6f335c251..c10b7bf30 100644
--- a/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp
+++ b/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp
@@ -1,10 +1,10 @@
-// Copyright 2021 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#include <random>
#include "common/literals.h"
+#include "common/settings.h"
#include "core/hle/kernel/board/nintendo/nx/k_system_control.h"
#include "core/hle/kernel/board/nintendo/nx/secure_monitor.h"
@@ -28,33 +28,20 @@ namespace {
using namespace Common::Literals;
-u32 GetMemoryModeForInit() {
- return 0x01;
-}
-
u32 GetMemorySizeForInit() {
- return 0;
+ return Settings::values.use_extended_memory_layout ? Smc::MemorySize_6GB : Smc::MemorySize_4GB;
}
Smc::MemoryArrangement GetMemoryArrangeForInit() {
- switch (GetMemoryModeForInit() & 0x3F) {
- case 0x01:
- default:
- return Smc::MemoryArrangement_4GB;
- case 0x02:
- return Smc::MemoryArrangement_4GBForAppletDev;
- case 0x03:
- return Smc::MemoryArrangement_4GBForSystemDev;
- case 0x11:
- return Smc::MemoryArrangement_6GB;
- case 0x12:
- return Smc::MemoryArrangement_6GBForAppletDev;
- case 0x21:
- return Smc::MemoryArrangement_8GB;
- }
+ return Settings::values.use_extended_memory_layout ? Smc::MemoryArrangement_6GB
+ : Smc::MemoryArrangement_4GB;
}
} // namespace
+size_t KSystemControl::Init::GetRealMemorySize() {
+ return GetIntendedMemorySize();
+}
+
// Initialization.
size_t KSystemControl::Init::GetIntendedMemorySize() {
switch (GetMemorySizeForInit()) {
@@ -69,7 +56,13 @@ size_t KSystemControl::Init::GetIntendedMemorySize() {
}
PAddr KSystemControl::Init::GetKernelPhysicalBaseAddress(u64 base_address) {
- return base_address;
+ const size_t real_dram_size = KSystemControl::Init::GetRealMemorySize();
+ const size_t intended_dram_size = KSystemControl::Init::GetIntendedMemorySize();
+ if (intended_dram_size * 2 < real_dram_size) {
+ return base_address;
+ } else {
+ return base_address + ((real_dram_size - intended_dram_size) / 2);
+ }
}
bool KSystemControl::Init::ShouldIncreaseThreadResourceLimit() {
@@ -154,9 +147,9 @@ u64 GenerateUniformRange(u64 min, u64 max, F f) {
} // Anonymous namespace
u64 KSystemControl::GenerateRandomU64() {
- static std::random_device device;
- static std::mt19937 gen(device());
- static std::uniform_int_distribution<u64> distribution(1, std::numeric_limits<u64>::max());
+ std::random_device device;
+ std::mt19937 gen(device());
+ std::uniform_int_distribution<u64> distribution(1, std::numeric_limits<u64>::max());
return distribution(gen);
}
diff --git a/src/core/hle/kernel/board/nintendo/nx/k_system_control.h b/src/core/hle/kernel/board/nintendo/nx/k_system_control.h
index 52f230ced..fe375769e 100644
--- a/src/core/hle/kernel/board/nintendo/nx/k_system_control.h
+++ b/src/core/hle/kernel/board/nintendo/nx/k_system_control.h
@@ -1,6 +1,5 @@
-// Copyright 2021 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
@@ -13,6 +12,7 @@ public:
class Init {
public:
// Initialization.
+ static std::size_t GetRealMemorySize();
static std::size_t GetIntendedMemorySize();
static PAddr GetKernelPhysicalBaseAddress(u64 base_address);
static bool ShouldIncreaseThreadResourceLimit();
diff --git a/src/core/hle/kernel/board/nintendo/nx/secure_monitor.h b/src/core/hle/kernel/board/nintendo/nx/secure_monitor.h
index f77a91dec..b0e4123f0 100644
--- a/src/core/hle/kernel/board/nintendo/nx/secure_monitor.h
+++ b/src/core/hle/kernel/board/nintendo/nx/secure_monitor.h
@@ -1,6 +1,5 @@
-// Copyright 2021 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
diff --git a/src/core/hle/kernel/code_set.cpp b/src/core/hle/kernel/code_set.cpp
index 1f434e9af..41386048b 100644
--- a/src/core/hle/kernel/code_set.cpp
+++ b/src/core/hle/kernel/code_set.cpp
@@ -1,6 +1,5 @@
-// Copyright 2019 yuzu emulator team
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#include "core/hle/kernel/code_set.h"
diff --git a/src/core/hle/kernel/code_set.h b/src/core/hle/kernel/code_set.h
index 5cc3b9829..5220dbcb6 100644
--- a/src/core/hle/kernel/code_set.h
+++ b/src/core/hle/kernel/code_set.h
@@ -1,6 +1,5 @@
-// Copyright 2019 yuzu emulator team
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
diff --git a/src/core/hle/kernel/global_scheduler_context.cpp b/src/core/hle/kernel/global_scheduler_context.cpp
index baad2c5d6..65576b8c4 100644
--- a/src/core/hle/kernel/global_scheduler_context.cpp
+++ b/src/core/hle/kernel/global_scheduler_context.cpp
@@ -1,6 +1,5 @@
-// Copyright 2020 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#include <mutex>
@@ -42,12 +41,7 @@ void GlobalSchedulerContext::PreemptThreads() {
ASSERT(IsLocked());
for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
const u32 priority = preemption_priorities[core_id];
- kernel.Scheduler(core_id).RotateScheduledQueue(core_id, priority);
-
- // Signal an interrupt occurred. For core 3, this is a certainty, as preemption will result
- // in the rotator thread being scheduled. For cores 0-2, this is to simulate or system
- // interrupts that may have occurred.
- kernel.PhysicalCore(core_id).Interrupt();
+ KScheduler::RotateScheduledQueue(kernel, core_id, priority);
}
}
diff --git a/src/core/hle/kernel/global_scheduler_context.h b/src/core/hle/kernel/global_scheduler_context.h
index 6f44b534f..67bb9852d 100644
--- a/src/core/hle/kernel/global_scheduler_context.h
+++ b/src/core/hle/kernel/global_scheduler_context.h
@@ -1,6 +1,5 @@
-// Copyright 2020 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
@@ -8,7 +7,6 @@
#include <vector>
#include "common/common_types.h"
-#include "common/spin_lock.h"
#include "core/hardware_properties.h"
#include "core/hle/kernel/k_priority_queue.h"
#include "core/hle/kernel/k_scheduler_lock.h"
@@ -80,7 +78,7 @@ private:
/// Lists all thread ids that aren't deleted/etc.
std::vector<KThread*> thread_list;
- Common::SpinLock global_list_guard{};
+ std::mutex global_list_guard;
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/hle_ipc.cpp b/src/core/hle/kernel/hle_ipc.cpp
index e19544c54..5b3feec66 100644
--- a/src/core/hle/kernel/hle_ipc.cpp
+++ b/src/core/hle/kernel/hle_ipc.cpp
@@ -1,6 +1,5 @@
-// Copyright 2018 yuzu emulator team
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#include <algorithm>
#include <array>
@@ -17,7 +16,6 @@
#include "core/hle/kernel/k_auto_object.h"
#include "core/hle/kernel/k_handle_table.h"
#include "core/hle/kernel/k_process.h"
-#include "core/hle/kernel/k_readable_event.h"
#include "core/hle/kernel/k_server_session.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h"
@@ -25,8 +23,15 @@
namespace Kernel {
-SessionRequestHandler::SessionRequestHandler(KernelCore& kernel_, const char* service_name_)
- : kernel{kernel_}, service_thread{kernel.CreateServiceThread(service_name_)} {}
+SessionRequestHandler::SessionRequestHandler(KernelCore& kernel_, const char* service_name_,
+ ServiceThreadType thread_type)
+ : kernel{kernel_} {
+ if (thread_type == ServiceThreadType::CreateNew) {
+ service_thread = kernel.CreateServiceThread(service_name_);
+ } else {
+ service_thread = kernel.GetDefaultServiceThread();
+ }
+}
SessionRequestHandler::~SessionRequestHandler() {
kernel.ReleaseServiceThread(service_thread);
@@ -45,7 +50,7 @@ bool SessionRequestManager::HasSessionRequestHandler(const HLERequestContext& co
LOG_CRITICAL(IPC, "object_id {} is too big!", object_id);
return false;
}
- return DomainHandler(object_id - 1) != nullptr;
+ return !DomainHandler(object_id - 1).expired();
} else {
return session_handler != nullptr;
}
@@ -55,7 +60,7 @@ void SessionRequestHandler::ClientConnected(KServerSession* session) {
session->ClientConnected(shared_from_this());
// Ensure our server session is tracked globally.
- kernel.RegisterServerSession(session);
+ kernel.RegisterServerObject(session);
}
void SessionRequestHandler::ClientDisconnected(KServerSession* session) {
@@ -183,8 +188,8 @@ void HLERequestContext::ParseCommandBuffer(const KHandleTable& handle_table, u32
rp.Skip(1, false); // The command is actually an u64, but we don't use the high part.
}
-ResultCode HLERequestContext::PopulateFromIncomingCommandBuffer(const KHandleTable& handle_table,
- u32_le* src_cmdbuf) {
+Result HLERequestContext::PopulateFromIncomingCommandBuffer(const KHandleTable& handle_table,
+ u32_le* src_cmdbuf) {
ParseCommandBuffer(handle_table, src_cmdbuf, true);
if (command_header->IsCloseCommand()) {
@@ -197,7 +202,7 @@ ResultCode HLERequestContext::PopulateFromIncomingCommandBuffer(const KHandleTab
return ResultSuccess;
}
-ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(KThread& requesting_thread) {
+Result HLERequestContext::WriteToOutgoingCommandBuffer(KThread& requesting_thread) {
auto current_offset = handles_offset;
auto& owner_process = *requesting_thread.GetOwnerProcess();
auto& handle_table = owner_process.GetHandleTable();
@@ -282,15 +287,49 @@ std::size_t HLERequestContext::WriteBuffer(const void* buffer, std::size_t size,
BufferDescriptorB().size() > buffer_index &&
BufferDescriptorB()[buffer_index].Size() >= size,
{ return 0; }, "BufferDescriptorB is invalid, index={}, size={}", buffer_index, size);
- memory.WriteBlock(BufferDescriptorB()[buffer_index].Address(), buffer, size);
+ WriteBufferB(buffer, size, buffer_index);
} else {
ASSERT_OR_EXECUTE_MSG(
BufferDescriptorC().size() > buffer_index &&
BufferDescriptorC()[buffer_index].Size() >= size,
{ return 0; }, "BufferDescriptorC is invalid, index={}, size={}", buffer_index, size);
- memory.WriteBlock(BufferDescriptorC()[buffer_index].Address(), buffer, size);
+ WriteBufferC(buffer, size, buffer_index);
+ }
+
+ return size;
+}
+
+std::size_t HLERequestContext::WriteBufferB(const void* buffer, std::size_t size,
+ std::size_t buffer_index) const {
+ if (buffer_index >= BufferDescriptorB().size() || size == 0) {
+ return 0;
+ }
+
+ const auto buffer_size{BufferDescriptorB()[buffer_index].Size()};
+ if (size > buffer_size) {
+ LOG_CRITICAL(Core, "size ({:016X}) is greater than buffer_size ({:016X})", size,
+ buffer_size);
+ size = buffer_size; // TODO(bunnei): This needs to be HW tested
+ }
+
+ memory.WriteBlock(BufferDescriptorB()[buffer_index].Address(), buffer, size);
+ return size;
+}
+
+std::size_t HLERequestContext::WriteBufferC(const void* buffer, std::size_t size,
+ std::size_t buffer_index) const {
+ if (buffer_index >= BufferDescriptorC().size() || size == 0) {
+ return 0;
+ }
+
+ const auto buffer_size{BufferDescriptorC()[buffer_index].Size()};
+ if (size > buffer_size) {
+ LOG_CRITICAL(Core, "size ({:016X}) is greater than buffer_size ({:016X})", size,
+ buffer_size);
+ size = buffer_size; // TODO(bunnei): This needs to be HW tested
}
+ memory.WriteBlock(BufferDescriptorC()[buffer_index].Address(), buffer, size);
return size;
}
diff --git a/src/core/hle/kernel/hle_ipc.h b/src/core/hle/kernel/hle_ipc.h
index 754b41ff6..99265ce90 100644
--- a/src/core/hle/kernel/hle_ipc.h
+++ b/src/core/hle/kernel/hle_ipc.h
@@ -1,6 +1,5 @@
-// Copyright 2018 yuzu emulator team
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
@@ -19,7 +18,7 @@
#include "core/hle/ipc.h"
#include "core/hle/kernel/svc_common.h"
-union ResultCode;
+union Result;
namespace Core::Memory {
class Memory;
@@ -33,6 +32,11 @@ namespace Service {
class ServiceFrameworkBase;
}
+enum class ServiceThreadType {
+ Default,
+ CreateNew,
+};
+
namespace Kernel {
class Domain;
@@ -57,7 +61,8 @@ enum class ThreadWakeupReason;
*/
class SessionRequestHandler : public std::enable_shared_from_this<SessionRequestHandler> {
public:
- SessionRequestHandler(KernelCore& kernel, const char* service_name_);
+ SessionRequestHandler(KernelCore& kernel_, const char* service_name_,
+ ServiceThreadType thread_type);
virtual ~SessionRequestHandler();
/**
@@ -66,10 +71,10 @@ public:
* it should be used to differentiate which client (As in ClientSession) we're answering to.
* TODO(Subv): Use a wrapper structure to hold all the information relevant to
* this request (ServerSession, Originator thread, Translated command buffer, etc).
- * @returns ResultCode the result code of the translate operation.
+ * @returns Result the result code of the translate operation.
*/
- virtual ResultCode HandleSyncRequest(Kernel::KServerSession& session,
- Kernel::HLERequestContext& context) = 0;
+ virtual Result HandleSyncRequest(Kernel::KServerSession& session,
+ Kernel::HLERequestContext& context) = 0;
/**
* Signals that a client has just connected to this HLE handler and keeps the
@@ -94,6 +99,7 @@ protected:
std::weak_ptr<ServiceThread> service_thread;
};
+using SessionRequestHandlerWeakPtr = std::weak_ptr<SessionRequestHandler>;
using SessionRequestHandlerPtr = std::shared_ptr<SessionRequestHandler>;
/**
@@ -135,11 +141,11 @@ public:
if (index < DomainHandlerCount()) {
domain_handlers[index] = nullptr;
} else {
- UNREACHABLE_MSG("Unexpected handler index {}", index);
+ ASSERT_MSG(false, "Unexpected handler index {}", index);
}
}
- SessionRequestHandlerPtr DomainHandler(std::size_t index) const {
+ SessionRequestHandlerWeakPtr DomainHandler(std::size_t index) const {
ASSERT_MSG(index < DomainHandlerCount(), "Unexpected handler index {}", index);
return domain_handlers.at(index);
}
@@ -206,11 +212,10 @@ public:
}
/// Populates this context with data from the requesting process/thread.
- ResultCode PopulateFromIncomingCommandBuffer(const KHandleTable& handle_table,
- u32_le* src_cmdbuf);
+ Result PopulateFromIncomingCommandBuffer(const KHandleTable& handle_table, u32_le* src_cmdbuf);
/// Writes data from this context back to the requesting process/thread.
- ResultCode WriteToOutgoingCommandBuffer(KThread& requesting_thread);
+ Result WriteToOutgoingCommandBuffer(KThread& requesting_thread);
u32_le GetHipcCommand() const {
return command;
@@ -272,6 +277,14 @@ public:
std::size_t WriteBuffer(const void* buffer, std::size_t size,
std::size_t buffer_index = 0) const;
+ /// Helper function to write buffer B
+ std::size_t WriteBufferB(const void* buffer, std::size_t size,
+ std::size_t buffer_index = 0) const;
+
+ /// Helper function to write buffer C
+ std::size_t WriteBufferC(const void* buffer, std::size_t size,
+ std::size_t buffer_index = 0) const;
+
/* Helper function to write a buffer using the appropriate buffer descriptor
*
* @tparam T an arbitrary container that satisfies the
@@ -328,10 +341,10 @@ public:
template <typename T>
std::shared_ptr<T> GetDomainHandler(std::size_t index) const {
- return std::static_pointer_cast<T>(manager->DomainHandler(index));
+ return std::static_pointer_cast<T>(manager.lock()->DomainHandler(index).lock());
}
- void SetSessionRequestManager(std::shared_ptr<SessionRequestManager> manager_) {
+ void SetSessionRequestManager(std::weak_ptr<SessionRequestManager> manager_) {
manager = std::move(manager_);
}
@@ -374,7 +387,7 @@ private:
u32 handles_offset{};
u32 domain_offset{};
- std::shared_ptr<SessionRequestManager> manager;
+ std::weak_ptr<SessionRequestManager> manager;
KernelCore& kernel;
Core::Memory::Memory& memory;
diff --git a/src/core/hle/kernel/init/init_slab_setup.cpp b/src/core/hle/kernel/init/init_slab_setup.cpp
index 36fc0944a..9b6b284d0 100644
--- a/src/core/hle/kernel/init/init_slab_setup.cpp
+++ b/src/core/hle/kernel/init/init_slab_setup.cpp
@@ -1,25 +1,28 @@
-// Copyright 2021 yuzu emulator team
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#include "common/alignment.h"
#include "common/assert.h"
#include "common/common_funcs.h"
#include "common/common_types.h"
#include "core/core.h"
+#include "core/device_memory.h"
#include "core/hardware_properties.h"
#include "core/hle/kernel/init/init_slab_setup.h"
#include "core/hle/kernel/k_code_memory.h"
#include "core/hle/kernel/k_event.h"
#include "core/hle/kernel/k_memory_layout.h"
#include "core/hle/kernel/k_memory_manager.h"
+#include "core/hle/kernel/k_page_buffer.h"
#include "core/hle/kernel/k_port.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_resource_limit.h"
#include "core/hle/kernel/k_session.h"
#include "core/hle/kernel/k_shared_memory.h"
+#include "core/hle/kernel/k_shared_memory_info.h"
#include "core/hle/kernel/k_system_control.h"
#include "core/hle/kernel/k_thread.h"
+#include "core/hle/kernel/k_thread_local_page.h"
#include "core/hle/kernel/k_transfer_memory.h"
namespace Kernel::Init {
@@ -32,9 +35,13 @@ namespace Kernel::Init {
HANDLER(KEvent, (SLAB_COUNT(KEvent)), ##__VA_ARGS__) \
HANDLER(KPort, (SLAB_COUNT(KPort)), ##__VA_ARGS__) \
HANDLER(KSharedMemory, (SLAB_COUNT(KSharedMemory)), ##__VA_ARGS__) \
+ HANDLER(KSharedMemoryInfo, (SLAB_COUNT(KSharedMemory) * 8), ##__VA_ARGS__) \
HANDLER(KTransferMemory, (SLAB_COUNT(KTransferMemory)), ##__VA_ARGS__) \
HANDLER(KCodeMemory, (SLAB_COUNT(KCodeMemory)), ##__VA_ARGS__) \
HANDLER(KSession, (SLAB_COUNT(KSession)), ##__VA_ARGS__) \
+ HANDLER(KThreadLocalPage, \
+ (SLAB_COUNT(KProcess) + (SLAB_COUNT(KProcess) + SLAB_COUNT(KThread)) / 8), \
+ ##__VA_ARGS__) \
HANDLER(KResourceLimit, (SLAB_COUNT(KResourceLimit)), ##__VA_ARGS__)
namespace {
@@ -50,38 +57,46 @@ enum KSlabType : u32 {
// Constexpr counts.
constexpr size_t SlabCountKProcess = 80;
constexpr size_t SlabCountKThread = 800;
-constexpr size_t SlabCountKEvent = 700;
+constexpr size_t SlabCountKEvent = 900;
constexpr size_t SlabCountKInterruptEvent = 100;
-constexpr size_t SlabCountKPort = 256 + 0x20; // Extra 0x20 ports over Nintendo for homebrew.
+constexpr size_t SlabCountKPort = 384;
constexpr size_t SlabCountKSharedMemory = 80;
constexpr size_t SlabCountKTransferMemory = 200;
constexpr size_t SlabCountKCodeMemory = 10;
constexpr size_t SlabCountKDeviceAddressSpace = 300;
-constexpr size_t SlabCountKSession = 933;
+constexpr size_t SlabCountKSession = 1133;
constexpr size_t SlabCountKLightSession = 100;
constexpr size_t SlabCountKObjectName = 7;
constexpr size_t SlabCountKResourceLimit = 5;
constexpr size_t SlabCountKDebug = Core::Hardware::NUM_CPU_CORES;
-constexpr size_t SlabCountKAlpha = 1;
-constexpr size_t SlabCountKBeta = 6;
+constexpr size_t SlabCountKIoPool = 1;
+constexpr size_t SlabCountKIoRegion = 6;
constexpr size_t SlabCountExtraKThread = 160;
+/// Helper function to translate from the slab virtual address to the reserved location in physical
+/// memory.
+static PAddr TranslateSlabAddrToPhysical(KMemoryLayout& memory_layout, VAddr slab_addr) {
+ slab_addr -= memory_layout.GetSlabRegionAddress();
+ return slab_addr + Core::DramMemoryMap::SlabHeapBase;
+}
+
template <typename T>
VAddr InitializeSlabHeap(Core::System& system, KMemoryLayout& memory_layout, VAddr address,
size_t num_objects) {
- // TODO(bunnei): This is just a place holder. We should initialize the appropriate KSlabHeap for
- // kernel object type T with the backing kernel memory pointer once we emulate kernel memory.
const size_t size = Common::AlignUp(sizeof(T) * num_objects, alignof(void*));
VAddr start = Common::AlignUp(address, alignof(T));
- // This is intentionally empty. Once KSlabHeap is fully implemented, we can replace this with
- // the pointer to emulated memory to pass along. Until then, KSlabHeap will just allocate/free
- // host memory.
- void* backing_kernel_memory{};
+ // This should use the virtual memory address passed in, but currently, we do not setup the
+ // kernel virtual memory layout. Instead, we simply map these at a region of physical memory
+ // that we reserve for the slab heaps.
+ // TODO(bunnei): Fix this once we support the kernel virtual memory layout.
if (size > 0) {
+ void* backing_kernel_memory{
+ system.DeviceMemory().GetPointer(TranslateSlabAddrToPhysical(memory_layout, start))};
+
const KMemoryRegion* region = memory_layout.FindVirtual(start + size - 1);
ASSERT(region != nullptr);
ASSERT(region->IsDerivedFrom(KMemoryRegionType_KernelSlab));
@@ -91,6 +106,12 @@ VAddr InitializeSlabHeap(Core::System& system, KMemoryLayout& memory_layout, VAd
return start + size;
}
+size_t CalculateSlabHeapGapSize() {
+ constexpr size_t KernelSlabHeapGapSize = 2_MiB - 296_KiB;
+ static_assert(KernelSlabHeapGapSize <= KernelSlabHeapGapsSizeMax);
+ return KernelSlabHeapGapSize;
+}
+
} // namespace
KSlabResourceCounts KSlabResourceCounts::CreateDefault() {
@@ -109,8 +130,8 @@ KSlabResourceCounts KSlabResourceCounts::CreateDefault() {
.num_KObjectName = SlabCountKObjectName,
.num_KResourceLimit = SlabCountKResourceLimit,
.num_KDebug = SlabCountKDebug,
- .num_KAlpha = SlabCountKAlpha,
- .num_KBeta = SlabCountKBeta,
+ .num_KIoPool = SlabCountKIoPool,
+ .num_KIoRegion = SlabCountKIoRegion,
};
}
@@ -136,11 +157,34 @@ size_t CalculateTotalSlabHeapSize(const KernelCore& kernel) {
#undef ADD_SLAB_SIZE
// Add the reserved size.
- size += KernelSlabHeapGapsSize;
+ size += CalculateSlabHeapGapSize();
return size;
}
+void InitializeKPageBufferSlabHeap(Core::System& system) {
+ auto& kernel = system.Kernel();
+
+ const auto& counts = kernel.SlabResourceCounts();
+ const size_t num_pages =
+ counts.num_KProcess + counts.num_KThread + (counts.num_KProcess + counts.num_KThread) / 8;
+ const size_t slab_size = num_pages * PageSize;
+
+ // Reserve memory from the system resource limit.
+ ASSERT(kernel.GetSystemResourceLimit()->Reserve(LimitableResource::PhysicalMemory, slab_size));
+
+ // Allocate memory for the slab.
+ constexpr auto AllocateOption = KMemoryManager::EncodeOption(
+ KMemoryManager::Pool::System, KMemoryManager::Direction::FromFront);
+ const PAddr slab_address =
+ kernel.MemoryManager().AllocateAndOpenContinuous(num_pages, 1, AllocateOption);
+ ASSERT(slab_address != 0);
+
+ // Initialize the slabheap.
+ KPageBuffer::InitializeSlabHeap(kernel, system.DeviceMemory().GetPointer(slab_address),
+ slab_size);
+}
+
void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout) {
auto& kernel = system.Kernel();
@@ -160,13 +204,13 @@ void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout) {
}
// Create an array to represent the gaps between the slabs.
- const size_t total_gap_size = KernelSlabHeapGapsSize;
+ const size_t total_gap_size = CalculateSlabHeapGapSize();
std::array<size_t, slab_types.size()> slab_gaps;
- for (size_t i = 0; i < slab_gaps.size(); i++) {
+ for (auto& slab_gap : slab_gaps) {
// Note: This is an off-by-one error from Nintendo's intention, because GenerateRandomRange
// is inclusive. However, Nintendo also has the off-by-one error, and it's "harmless", so we
// will include it ourselves.
- slab_gaps[i] = KSystemControl::GenerateRandomRange(0, total_gap_size);
+ slab_gap = KSystemControl::GenerateRandomRange(0, total_gap_size);
}
// Sort the array, so that we can treat differences between values as offsets to the starts of
@@ -177,13 +221,21 @@ void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout) {
}
}
- for (size_t i = 0; i < slab_types.size(); i++) {
+ // Track the gaps, so that we can free them to the unused slab tree.
+ VAddr gap_start = address;
+ size_t gap_size = 0;
+
+ for (size_t i = 0; i < slab_gaps.size(); i++) {
// Add the random gap to the address.
- address += (i == 0) ? slab_gaps[0] : slab_gaps[i] - slab_gaps[i - 1];
+ const auto cur_gap = (i == 0) ? slab_gaps[0] : slab_gaps[i] - slab_gaps[i - 1];
+ address += cur_gap;
+ gap_size += cur_gap;
#define INITIALIZE_SLAB_HEAP(NAME, COUNT, ...) \
case KSlabType_##NAME: \
- address = InitializeSlabHeap<NAME>(system, memory_layout, address, COUNT); \
+ if (COUNT > 0) { \
+ address = InitializeSlabHeap<NAME>(system, memory_layout, address, COUNT); \
+ } \
break;
// Initialize the slabheap.
@@ -192,7 +244,13 @@ void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout) {
FOREACH_SLAB_TYPE(INITIALIZE_SLAB_HEAP)
// If we somehow get an invalid type, abort.
default:
- UNREACHABLE();
+ ASSERT_MSG(false, "Unknown slab type: {}", slab_types[i]);
+ }
+
+ // If we've hit the end of a gap, free it.
+ if (gap_start + gap_size != address) {
+ gap_start = address;
+ gap_size = 0;
}
}
}
diff --git a/src/core/hle/kernel/init/init_slab_setup.h b/src/core/hle/kernel/init/init_slab_setup.h
index a8f7e0918..13be63c87 100644
--- a/src/core/hle/kernel/init/init_slab_setup.h
+++ b/src/core/hle/kernel/init/init_slab_setup.h
@@ -1,6 +1,5 @@
-// Copyright 2021 yuzu emulator team
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
@@ -32,12 +31,13 @@ struct KSlabResourceCounts {
size_t num_KObjectName;
size_t num_KResourceLimit;
size_t num_KDebug;
- size_t num_KAlpha;
- size_t num_KBeta;
+ size_t num_KIoPool;
+ size_t num_KIoRegion;
};
void InitializeSlabResourceCounts(KernelCore& kernel);
size_t CalculateTotalSlabHeapSize(const KernelCore& kernel);
+void InitializeKPageBufferSlabHeap(Core::System& system);
void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout);
} // namespace Kernel::Init
diff --git a/src/core/hle/kernel/initial_process.h b/src/core/hle/kernel/initial_process.h
new file mode 100644
index 000000000..af0fb23b6
--- /dev/null
+++ b/src/core/hle/kernel/initial_process.h
@@ -0,0 +1,22 @@
+// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include "common/common_types.h"
+#include "common/literals.h"
+#include "core/hle/kernel/board/nintendo/nx/k_memory_layout.h"
+#include "core/hle/kernel/board/nintendo/nx/k_system_control.h"
+
+namespace Kernel {
+
+using namespace Common::Literals;
+
+constexpr std::size_t InitialProcessBinarySizeMax = 12_MiB;
+
+static inline PAddr GetInitialProcessBinaryPhysicalAddress() {
+ return Kernel::Board::Nintendo::Nx::KSystemControl::Init::GetKernelPhysicalBaseAddress(
+ MainMemoryAddress);
+}
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_address_arbiter.cpp b/src/core/hle/kernel/k_address_arbiter.cpp
index 783c69858..f85b11557 100644
--- a/src/core/hle/kernel/k_address_arbiter.cpp
+++ b/src/core/hle/kernel/k_address_arbiter.cpp
@@ -1,6 +1,5 @@
-// Copyright 2021 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#include "core/arm/exclusive_monitor.h"
#include "core/core.h"
@@ -49,7 +48,7 @@ bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 valu
}
} else {
// Otherwise, clear our exclusive hold and finish
- monitor.ClearExclusive();
+ monitor.ClearExclusive(current_core);
}
// We're done.
@@ -78,7 +77,7 @@ bool UpdateIfEqual(Core::System& system, s32* out, VAddr address, s32 value, s32
}
} else {
// Otherwise, clear our exclusive hold and finish.
- monitor.ClearExclusive();
+ monitor.ClearExclusive(current_core);
}
// We're done.
@@ -91,8 +90,7 @@ public:
explicit ThreadQueueImplForKAddressArbiter(KernelCore& kernel_, KAddressArbiter::ThreadTree* t)
: KThreadQueue(kernel_), m_tree(t) {}
- void CancelWait(KThread* waiting_thread, ResultCode wait_result,
- bool cancel_timer_task) override {
+ void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override {
// If the thread is waiting on an address arbiter, remove it from the tree.
if (waiting_thread->IsWaitingForAddressArbiter()) {
m_tree->erase(m_tree->iterator_to(*waiting_thread));
@@ -109,13 +107,13 @@ private:
} // namespace
-ResultCode KAddressArbiter::Signal(VAddr addr, s32 count) {
+Result KAddressArbiter::Signal(VAddr addr, s32 count) {
// Perform signaling.
s32 num_waiters{};
{
KScopedSchedulerLock sl(kernel);
- auto it = thread_tree.nfind_light({addr, -1});
+ auto it = thread_tree.nfind_key({addr, -1});
while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
(it->GetAddressArbiterKey() == addr)) {
// End the thread's wait.
@@ -132,7 +130,7 @@ ResultCode KAddressArbiter::Signal(VAddr addr, s32 count) {
return ResultSuccess;
}
-ResultCode KAddressArbiter::SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 count) {
+Result KAddressArbiter::SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 count) {
// Perform signaling.
s32 num_waiters{};
{
@@ -148,7 +146,7 @@ ResultCode KAddressArbiter::SignalAndIncrementIfEqual(VAddr addr, s32 value, s32
return ResultInvalidState;
}
- auto it = thread_tree.nfind_light({addr, -1});
+ auto it = thread_tree.nfind_key({addr, -1});
while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
(it->GetAddressArbiterKey() == addr)) {
// End the thread's wait.
@@ -165,13 +163,13 @@ ResultCode KAddressArbiter::SignalAndIncrementIfEqual(VAddr addr, s32 value, s32
return ResultSuccess;
}
-ResultCode KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 value, s32 count) {
+Result KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 value, s32 count) {
// Perform signaling.
s32 num_waiters{};
{
[[maybe_unused]] const KScopedSchedulerLock sl(kernel);
- auto it = thread_tree.nfind_light({addr, -1});
+ auto it = thread_tree.nfind_key({addr, -1});
// Determine the updated value.
s32 new_value{};
if (count <= 0) {
@@ -233,9 +231,9 @@ ResultCode KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32
return ResultSuccess;
}
-ResultCode KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout) {
+Result KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout) {
// Prepare to wait.
- KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
+ KThread* cur_thread = GetCurrentThreadPointer(kernel);
ThreadQueueImplForKAddressArbiter wait_queue(kernel, std::addressof(thread_tree));
{
@@ -286,9 +284,9 @@ ResultCode KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement
return cur_thread->GetWaitResult();
}
-ResultCode KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) {
+Result KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) {
// Prepare to wait.
- KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
+ KThread* cur_thread = GetCurrentThreadPointer(kernel);
ThreadQueueImplForKAddressArbiter wait_queue(kernel, std::addressof(thread_tree));
{
diff --git a/src/core/hle/kernel/k_address_arbiter.h b/src/core/hle/kernel/k_address_arbiter.h
index bf8b46665..e4085ae22 100644
--- a/src/core/hle/kernel/k_address_arbiter.h
+++ b/src/core/hle/kernel/k_address_arbiter.h
@@ -1,6 +1,5 @@
-// Copyright 2021 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
@@ -9,7 +8,7 @@
#include "core/hle/kernel/k_condition_variable.h"
#include "core/hle/kernel/svc_types.h"
-union ResultCode;
+union Result;
namespace Core {
class System;
@@ -26,8 +25,7 @@ public:
explicit KAddressArbiter(Core::System& system_);
~KAddressArbiter();
- [[nodiscard]] ResultCode SignalToAddress(VAddr addr, Svc::SignalType type, s32 value,
- s32 count) {
+ [[nodiscard]] Result SignalToAddress(VAddr addr, Svc::SignalType type, s32 value, s32 count) {
switch (type) {
case Svc::SignalType::Signal:
return Signal(addr, count);
@@ -36,12 +34,12 @@ public:
case Svc::SignalType::SignalAndModifyByWaitingCountIfEqual:
return SignalAndModifyByWaitingCountIfEqual(addr, value, count);
}
- UNREACHABLE();
+ ASSERT(false);
return ResultUnknown;
}
- [[nodiscard]] ResultCode WaitForAddress(VAddr addr, Svc::ArbitrationType type, s32 value,
- s64 timeout) {
+ [[nodiscard]] Result WaitForAddress(VAddr addr, Svc::ArbitrationType type, s32 value,
+ s64 timeout) {
switch (type) {
case Svc::ArbitrationType::WaitIfLessThan:
return WaitIfLessThan(addr, value, false, timeout);
@@ -50,16 +48,16 @@ public:
case Svc::ArbitrationType::WaitIfEqual:
return WaitIfEqual(addr, value, timeout);
}
- UNREACHABLE();
+ ASSERT(false);
return ResultUnknown;
}
private:
- [[nodiscard]] ResultCode Signal(VAddr addr, s32 count);
- [[nodiscard]] ResultCode SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 count);
- [[nodiscard]] ResultCode SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 value, s32 count);
- [[nodiscard]] ResultCode WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout);
- [[nodiscard]] ResultCode WaitIfEqual(VAddr addr, s32 value, s64 timeout);
+ [[nodiscard]] Result Signal(VAddr addr, s32 count);
+ [[nodiscard]] Result SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 count);
+ [[nodiscard]] Result SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 value, s32 count);
+ [[nodiscard]] Result WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout);
+ [[nodiscard]] Result WaitIfEqual(VAddr addr, s32 value, s64 timeout);
ThreadTree thread_tree;
diff --git a/src/core/hle/kernel/k_address_space_info.cpp b/src/core/hle/kernel/k_address_space_info.cpp
index ca29edc88..3e612a207 100644
--- a/src/core/hle/kernel/k_address_space_info.cpp
+++ b/src/core/hle/kernel/k_address_space_info.cpp
@@ -1,6 +1,5 @@
-// Copyright 2020 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#include <array>
@@ -85,7 +84,7 @@ u64 KAddressSpaceInfo::GetAddressSpaceStart(std::size_t width, Type type) {
ASSERT(IsAllowedIndexForAddress(AddressSpaceIndices39Bit[index]));
return AddressSpaceInfos[AddressSpaceIndices39Bit[index]].address;
}
- UNREACHABLE();
+ ASSERT(false);
return 0;
}
@@ -102,7 +101,7 @@ std::size_t KAddressSpaceInfo::GetAddressSpaceSize(std::size_t width, Type type)
ASSERT(IsAllowed39BitType(type));
return AddressSpaceInfos[AddressSpaceIndices39Bit[index]].size;
}
- UNREACHABLE();
+ ASSERT(false);
return 0;
}
diff --git a/src/core/hle/kernel/k_address_space_info.h b/src/core/hle/kernel/k_address_space_info.h
index 06f31c6d5..69e9d77f2 100644
--- a/src/core/hle/kernel/k_address_space_info.h
+++ b/src/core/hle/kernel/k_address_space_info.h
@@ -1,6 +1,5 @@
-// Copyright 2020 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
diff --git a/src/core/hle/kernel/k_affinity_mask.h b/src/core/hle/kernel/k_affinity_mask.h
index cf704ce87..b58716e90 100644
--- a/src/core/hle/kernel/k_affinity_mask.h
+++ b/src/core/hle/kernel/k_affinity_mask.h
@@ -1,9 +1,5 @@
-// Copyright 2020 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
-
-// This file references various implementation details from Atmosphere, an open-source firmware for
-// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
+// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
diff --git a/src/core/hle/kernel/k_auto_object.cpp b/src/core/hle/kernel/k_auto_object.cpp
index c99a9ebb7..691af8ccb 100644
--- a/src/core/hle/kernel/k_auto_object.cpp
+++ b/src/core/hle/kernel/k_auto_object.cpp
@@ -1,6 +1,5 @@
-// Copyright 2021 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#include "core/hle/kernel/k_auto_object.h"
#include "core/hle/kernel/kernel.h"
diff --git a/src/core/hle/kernel/k_auto_object.h b/src/core/hle/kernel/k_auto_object.h
index 05779f2d5..2827763d5 100644
--- a/src/core/hle/kernel/k_auto_object.h
+++ b/src/core/hle/kernel/k_auto_object.h
@@ -1,6 +1,5 @@
-// Copyright 2021 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
@@ -19,7 +18,7 @@ namespace Kernel {
class KernelCore;
class KProcess;
-#define KERNEL_AUTOOBJECT_TRAITS(CLASS, BASE_CLASS) \
+#define KERNEL_AUTOOBJECT_TRAITS_IMPL(CLASS, BASE_CLASS, ATTRIBUTE) \
\
private: \
friend class ::Kernel::KClassTokenGenerator; \
@@ -41,16 +40,19 @@ public:
static constexpr const char* GetStaticTypeName() { \
return TypeName; \
} \
- virtual TypeObj GetTypeObj() const { \
+ virtual TypeObj GetTypeObj() ATTRIBUTE { \
return GetStaticTypeObj(); \
} \
- virtual const char* GetTypeName() const { \
+ virtual const char* GetTypeName() ATTRIBUTE { \
return GetStaticTypeName(); \
} \
\
private: \
constexpr bool operator!=(const TypeObj& rhs)
+#define KERNEL_AUTOOBJECT_TRAITS(CLASS, BASE_CLASS) \
+ KERNEL_AUTOOBJECT_TRAITS_IMPL(CLASS, BASE_CLASS, const override)
+
class KAutoObject {
protected:
class TypeObj {
@@ -83,15 +85,13 @@ protected:
};
private:
- KERNEL_AUTOOBJECT_TRAITS(KAutoObject, KAutoObject);
+ KERNEL_AUTOOBJECT_TRAITS_IMPL(KAutoObject, KAutoObject, const);
public:
explicit KAutoObject(KernelCore& kernel_) : kernel(kernel_) {
RegisterWithKernel();
}
- virtual ~KAutoObject() {
- UnregisterWithKernel();
- }
+ virtual ~KAutoObject() = default;
static KAutoObject* Create(KAutoObject* ptr);
@@ -163,11 +163,12 @@ public:
do {
ASSERT(cur_ref_count > 0);
} while (!m_ref_count.compare_exchange_weak(cur_ref_count, cur_ref_count - 1,
- std::memory_order_relaxed));
+ std::memory_order_acq_rel));
// If ref count hits zero, destroy the object.
if (cur_ref_count - 1 == 0) {
this->Destroy();
+ this->UnregisterWithKernel();
}
}
diff --git a/src/core/hle/kernel/k_auto_object_container.cpp b/src/core/hle/kernel/k_auto_object_container.cpp
index d5f80d5b2..636b3f993 100644
--- a/src/core/hle/kernel/k_auto_object_container.cpp
+++ b/src/core/hle/kernel/k_auto_object_container.cpp
@@ -1,6 +1,5 @@
-// Copyright 2021 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#include <algorithm>
diff --git a/src/core/hle/kernel/k_auto_object_container.h b/src/core/hle/kernel/k_auto_object_container.h
index 697cc4289..badd75d2a 100644
--- a/src/core/hle/kernel/k_auto_object_container.h
+++ b/src/core/hle/kernel/k_auto_object_container.h
@@ -1,6 +1,5 @@
-// Copyright 2021 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
diff --git a/src/core/hle/kernel/k_class_token.cpp b/src/core/hle/kernel/k_class_token.cpp
index 21e2fe494..cc2a0f7ca 100644
--- a/src/core/hle/kernel/k_class_token.cpp
+++ b/src/core/hle/kernel/k_class_token.cpp
@@ -1,6 +1,5 @@
-// Copyright 2021 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#include "core/hle/kernel/k_auto_object.h"
#include "core/hle/kernel/k_class_token.h"
diff --git a/src/core/hle/kernel/k_class_token.h b/src/core/hle/kernel/k_class_token.h
index 980010150..c9001ae3d 100644
--- a/src/core/hle/kernel/k_class_token.h
+++ b/src/core/hle/kernel/k_class_token.h
@@ -1,11 +1,8 @@
-// Copyright 2021 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
-#include <atomic>
-
#include "common/bit_util.h"
#include "common/common_types.h"
@@ -52,6 +49,7 @@ private:
}
}
}
+ UNREACHABLE();
}();
template <typename T>
diff --git a/src/core/hle/kernel/k_client_port.cpp b/src/core/hle/kernel/k_client_port.cpp
index ef168fe87..3cb22ff4d 100644
--- a/src/core/hle/kernel/k_client_port.cpp
+++ b/src/core/hle/kernel/k_client_port.cpp
@@ -1,6 +1,5 @@
-// Copyright 2021 Citra Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: 2021 Citra Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#include "common/scope_exit.h"
#include "core/hle/kernel/hle_ipc.h"
@@ -59,8 +58,8 @@ bool KClientPort::IsSignaled() const {
return num_sessions < max_sessions;
}
-ResultCode KClientPort::CreateSession(KClientSession** out,
- std::shared_ptr<SessionRequestManager> session_manager) {
+Result KClientPort::CreateSession(KClientSession** out,
+ std::shared_ptr<SessionRequestManager> session_manager) {
// Reserve a new session from the resource limit.
KScopedResourceReservation session_reservation(kernel.CurrentProcess()->GetResourceLimit(),
LimitableResource::Sessions);
diff --git a/src/core/hle/kernel/k_client_port.h b/src/core/hle/kernel/k_client_port.h
index 54bb05e20..e17eff28f 100644
--- a/src/core/hle/kernel/k_client_port.h
+++ b/src/core/hle/kernel/k_client_port.h
@@ -1,6 +1,5 @@
-// Copyright 2016 Citra Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: 2016 Citra Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
@@ -53,8 +52,8 @@ public:
void Destroy() override;
bool IsSignaled() const override;
- ResultCode CreateSession(KClientSession** out,
- std::shared_ptr<SessionRequestManager> session_manager = nullptr);
+ Result CreateSession(KClientSession** out,
+ std::shared_ptr<SessionRequestManager> session_manager = nullptr);
private:
std::atomic<s32> num_sessions{};
diff --git a/src/core/hle/kernel/k_client_session.cpp b/src/core/hle/kernel/k_client_session.cpp
index 242582f8f..b2a887b14 100644
--- a/src/core/hle/kernel/k_client_session.cpp
+++ b/src/core/hle/kernel/k_client_session.cpp
@@ -1,6 +1,5 @@
-// Copyright 2021 yuzu emulator team
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#include "core/hle/kernel/hle_ipc.h"
#include "core/hle/kernel/k_client_session.h"
@@ -22,8 +21,8 @@ void KClientSession::Destroy() {
void KClientSession::OnServerClosed() {}
-ResultCode KClientSession::SendSyncRequest(KThread* thread, Core::Memory::Memory& memory,
- Core::Timing::CoreTiming& core_timing) {
+Result KClientSession::SendSyncRequest(KThread* thread, Core::Memory::Memory& memory,
+ Core::Timing::CoreTiming& core_timing) {
// Signal the server session that new data is available
return parent->GetServerSession().HandleSyncRequest(thread, memory, core_timing);
}
diff --git a/src/core/hle/kernel/k_client_session.h b/src/core/hle/kernel/k_client_session.h
index ad6cc4ed1..0c750d756 100644
--- a/src/core/hle/kernel/k_client_session.h
+++ b/src/core/hle/kernel/k_client_session.h
@@ -1,6 +1,5 @@
-// Copyright 2021 yuzu emulator team
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
@@ -10,7 +9,7 @@
#include "core/hle/kernel/slab_helpers.h"
#include "core/hle/result.h"
-union ResultCode;
+union Result;
namespace Core::Memory {
class Memory;
@@ -47,8 +46,8 @@ public:
return parent;
}
- ResultCode SendSyncRequest(KThread* thread, Core::Memory::Memory& memory,
- Core::Timing::CoreTiming& core_timing);
+ Result SendSyncRequest(KThread* thread, Core::Memory::Memory& memory,
+ Core::Timing::CoreTiming& core_timing);
void OnServerClosed();
diff --git a/src/core/hle/kernel/k_code_memory.cpp b/src/core/hle/kernel/k_code_memory.cpp
index 0b225e8e0..da57ceb21 100644
--- a/src/core/hle/kernel/k_code_memory.cpp
+++ b/src/core/hle/kernel/k_code_memory.cpp
@@ -1,15 +1,13 @@
-// Copyright 2021 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#include "common/alignment.h"
#include "common/common_types.h"
#include "core/device_memory.h"
-#include "core/hle/kernel/k_auto_object.h"
#include "core/hle/kernel/k_code_memory.h"
#include "core/hle/kernel/k_light_lock.h"
#include "core/hle/kernel/k_memory_block.h"
-#include "core/hle/kernel/k_page_linked_list.h"
+#include "core/hle/kernel/k_page_group.h"
#include "core/hle/kernel/k_page_table.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/slab_helpers.h"
@@ -21,7 +19,7 @@ namespace Kernel {
KCodeMemory::KCodeMemory(KernelCore& kernel_)
: KAutoObjectWithSlabHeapAndContainer{kernel_}, m_lock(kernel_) {}
-ResultCode KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr, size_t size) {
+Result KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr, size_t size) {
// Set members.
m_owner = kernel.CurrentProcess();
@@ -29,10 +27,10 @@ ResultCode KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr
auto& page_table = m_owner->PageTable();
// Construct the page group.
- m_page_group = KPageLinkedList(addr, Common::DivideUp(size, PageSize));
+ m_page_group = {};
// Lock the memory.
- R_TRY(page_table.LockForCodeMemory(addr, size))
+ R_TRY(page_table.LockForCodeMemory(&m_page_group, addr, size))
// Clear the memory.
for (const auto& block : m_page_group.Nodes()) {
@@ -40,6 +38,7 @@ ResultCode KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr
}
// Set remaining tracking members.
+ m_owner->Open();
m_address = addr;
m_is_initialized = true;
m_is_owner_mapped = false;
@@ -53,11 +52,17 @@ void KCodeMemory::Finalize() {
// Unlock.
if (!m_is_mapped && !m_is_owner_mapped) {
const size_t size = m_page_group.GetNumPages() * PageSize;
- m_owner->PageTable().UnlockForCodeMemory(m_address, size);
+ m_owner->PageTable().UnlockForCodeMemory(m_address, size, m_page_group);
}
+
+ // Close the page group.
+ m_page_group = {};
+
+ // Close our reference to our owner.
+ m_owner->Close();
}
-ResultCode KCodeMemory::Map(VAddr address, size_t size) {
+Result KCodeMemory::Map(VAddr address, size_t size) {
// Validate the size.
R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
@@ -77,7 +82,7 @@ ResultCode KCodeMemory::Map(VAddr address, size_t size) {
return ResultSuccess;
}
-ResultCode KCodeMemory::Unmap(VAddr address, size_t size) {
+Result KCodeMemory::Unmap(VAddr address, size_t size) {
// Validate the size.
R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
@@ -94,7 +99,7 @@ ResultCode KCodeMemory::Unmap(VAddr address, size_t size) {
return ResultSuccess;
}
-ResultCode KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission perm) {
+Result KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission perm) {
// Validate the size.
R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
@@ -114,7 +119,8 @@ ResultCode KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermis
k_perm = KMemoryPermission::UserReadExecute;
break;
default:
- break;
+ // Already validated by ControlCodeMemory svc
+ UNREACHABLE();
}
// Map the memory.
@@ -127,7 +133,7 @@ ResultCode KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermis
return ResultSuccess;
}
-ResultCode KCodeMemory::UnmapFromOwner(VAddr address, size_t size) {
+Result KCodeMemory::UnmapFromOwner(VAddr address, size_t size) {
// Validate the size.
R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
diff --git a/src/core/hle/kernel/k_code_memory.h b/src/core/hle/kernel/k_code_memory.h
index e0ba19a53..2e7e1436a 100644
--- a/src/core/hle/kernel/k_code_memory.h
+++ b/src/core/hle/kernel/k_code_memory.h
@@ -1,6 +1,5 @@
-// Copyright 2021 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
@@ -8,7 +7,7 @@
#include "core/device_memory.h"
#include "core/hle/kernel/k_auto_object.h"
#include "core/hle/kernel/k_light_lock.h"
-#include "core/hle/kernel/k_page_linked_list.h"
+#include "core/hle/kernel/k_page_group.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/slab_helpers.h"
#include "core/hle/kernel/svc_types.h"
@@ -30,20 +29,20 @@ class KCodeMemory final
public:
explicit KCodeMemory(KernelCore& kernel_);
- ResultCode Initialize(Core::DeviceMemory& device_memory, VAddr address, size_t size);
- void Finalize();
+ Result Initialize(Core::DeviceMemory& device_memory, VAddr address, size_t size);
+ void Finalize() override;
- ResultCode Map(VAddr address, size_t size);
- ResultCode Unmap(VAddr address, size_t size);
- ResultCode MapToOwner(VAddr address, size_t size, Svc::MemoryPermission perm);
- ResultCode UnmapFromOwner(VAddr address, size_t size);
+ Result Map(VAddr address, size_t size);
+ Result Unmap(VAddr address, size_t size);
+ Result MapToOwner(VAddr address, size_t size, Svc::MemoryPermission perm);
+ Result UnmapFromOwner(VAddr address, size_t size);
- bool IsInitialized() const {
+ bool IsInitialized() const override {
return m_is_initialized;
}
static void PostDestroy([[maybe_unused]] uintptr_t arg) {}
- KProcess* GetOwner() const {
+ KProcess* GetOwner() const override {
return m_owner;
}
VAddr GetSourceAddress() const {
@@ -54,7 +53,7 @@ public:
}
private:
- KPageLinkedList m_page_group{};
+ KPageGroup m_page_group{};
KProcess* m_owner{};
VAddr m_address{};
KLightLock m_lock;
diff --git a/src/core/hle/kernel/k_condition_variable.cpp b/src/core/hle/kernel/k_condition_variable.cpp
index aadcc297a..124149697 100644
--- a/src/core/hle/kernel/k_condition_variable.cpp
+++ b/src/core/hle/kernel/k_condition_variable.cpp
@@ -1,6 +1,5 @@
-// Copyright 2021 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#include "core/arm/exclusive_monitor.h"
#include "core/core.h"
@@ -9,7 +8,6 @@
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
-#include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/k_thread_queue.h"
#include "core/hle/kernel/kernel.h"
@@ -63,8 +61,7 @@ public:
explicit ThreadQueueImplForKConditionVariableWaitForAddress(KernelCore& kernel_)
: KThreadQueue(kernel_) {}
- void CancelWait(KThread* waiting_thread, ResultCode wait_result,
- bool cancel_timer_task) override {
+ void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override {
// Remove the thread as a waiter from its owner.
waiting_thread->GetLockOwner()->RemoveWaiter(waiting_thread);
@@ -82,8 +79,7 @@ public:
KernelCore& kernel_, KConditionVariable::ThreadTree* t)
: KThreadQueue(kernel_), m_tree(t) {}
- void CancelWait(KThread* waiting_thread, ResultCode wait_result,
- bool cancel_timer_task) override {
+ void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override {
// Remove the thread as a waiter from its owner.
if (KThread* owner = waiting_thread->GetLockOwner(); owner != nullptr) {
owner->RemoveWaiter(waiting_thread);
@@ -107,8 +103,8 @@ KConditionVariable::KConditionVariable(Core::System& system_)
KConditionVariable::~KConditionVariable() = default;
-ResultCode KConditionVariable::SignalToAddress(VAddr addr) {
- KThread* owner_thread = kernel.CurrentScheduler()->GetCurrentThread();
+Result KConditionVariable::SignalToAddress(VAddr addr) {
+ KThread* owner_thread = GetCurrentThreadPointer(kernel);
// Signal the address.
{
@@ -128,7 +124,7 @@ ResultCode KConditionVariable::SignalToAddress(VAddr addr) {
}
// Write the value to userspace.
- ResultCode result{ResultSuccess};
+ Result result{ResultSuccess};
if (WriteToUser(system, addr, std::addressof(next_value))) [[likely]] {
result = ResultSuccess;
} else {
@@ -148,8 +144,8 @@ ResultCode KConditionVariable::SignalToAddress(VAddr addr) {
}
}
-ResultCode KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 value) {
- KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
+Result KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 value) {
+ KThread* cur_thread = GetCurrentThreadPointer(kernel);
ThreadQueueImplForKConditionVariableWaitForAddress wait_queue(kernel);
// Wait for the address.
@@ -244,7 +240,7 @@ void KConditionVariable::Signal(u64 cv_key, s32 count) {
{
KScopedSchedulerLock sl(kernel);
- auto it = thread_tree.nfind_light({cv_key, -1});
+ auto it = thread_tree.nfind_key({cv_key, -1});
while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
(it->GetConditionVariableKey() == cv_key)) {
KThread* target_thread = std::addressof(*it);
@@ -263,7 +259,7 @@ void KConditionVariable::Signal(u64 cv_key, s32 count) {
}
}
-ResultCode KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) {
+Result KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) {
// Prepare to wait.
KThread* cur_thread = GetCurrentThreadPointer(kernel);
ThreadQueueImplForKConditionVariableWaitConditionVariable wait_queue(
diff --git a/src/core/hle/kernel/k_condition_variable.h b/src/core/hle/kernel/k_condition_variable.h
index 5e4815d08..fad4ed011 100644
--- a/src/core/hle/kernel/k_condition_variable.h
+++ b/src/core/hle/kernel/k_condition_variable.h
@@ -1,6 +1,5 @@
-// Copyright 2021 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
@@ -26,12 +25,12 @@ public:
~KConditionVariable();
// Arbitration
- [[nodiscard]] ResultCode SignalToAddress(VAddr addr);
- [[nodiscard]] ResultCode WaitForAddress(Handle handle, VAddr addr, u32 value);
+ [[nodiscard]] Result SignalToAddress(VAddr addr);
+ [[nodiscard]] Result WaitForAddress(Handle handle, VAddr addr, u32 value);
// Condition variable
void Signal(u64 cv_key, s32 count);
- [[nodiscard]] ResultCode Wait(VAddr addr, u64 key, u32 value, s64 timeout);
+ [[nodiscard]] Result Wait(VAddr addr, u64 key, u32 value, s64 timeout);
private:
void SignalImpl(KThread* thread);
diff --git a/src/core/hle/kernel/k_event.cpp b/src/core/hle/kernel/k_event.cpp
index 0720efece..e52fafbc7 100644
--- a/src/core/hle/kernel/k_event.cpp
+++ b/src/core/hle/kernel/k_event.cpp
@@ -1,6 +1,5 @@
-// Copyright 2021 yuzu emulator team
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#include "core/hle/kernel/k_event.h"
#include "core/hle/kernel/k_process.h"
@@ -14,7 +13,7 @@ KEvent::KEvent(KernelCore& kernel_)
KEvent::~KEvent() = default;
-void KEvent::Initialize(std::string&& name_) {
+void KEvent::Initialize(std::string&& name_, KProcess* owner_) {
// Increment reference count.
// Because reference count is one on creation, this will result
// in a reference count of two. Thus, when both readable and
@@ -30,10 +29,8 @@ void KEvent::Initialize(std::string&& name_) {
writable_event.Initialize(this, name_ + ":Writable");
// Set our owner process.
- owner = kernel.CurrentProcess();
- if (owner) {
- owner->Open();
- }
+ owner = owner_;
+ owner->Open();
// Mark initialized.
name = std::move(name_);
@@ -47,10 +44,8 @@ void KEvent::Finalize() {
void KEvent::PostDestroy(uintptr_t arg) {
// Release the event count resource the owner process holds.
KProcess* owner = reinterpret_cast<KProcess*>(arg);
- if (owner) {
- owner->GetResourceLimit()->Release(LimitableResource::Events, 1);
- owner->Close();
- }
+ owner->GetResourceLimit()->Release(LimitableResource::Events, 1);
+ owner->Close();
}
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_event.h b/src/core/hle/kernel/k_event.h
index 3d3ec99e2..2ff828feb 100644
--- a/src/core/hle/kernel/k_event.h
+++ b/src/core/hle/kernel/k_event.h
@@ -1,6 +1,5 @@
-// Copyright 2021 yuzu emulator team
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
@@ -22,7 +21,7 @@ public:
explicit KEvent(KernelCore& kernel_);
~KEvent() override;
- void Initialize(std::string&& name);
+ void Initialize(std::string&& name, KProcess* owner_);
void Finalize() override;
diff --git a/src/core/hle/kernel/k_handle_table.cpp b/src/core/hle/kernel/k_handle_table.cpp
index cf95f0852..e830ca46e 100644
--- a/src/core/hle/kernel/k_handle_table.cpp
+++ b/src/core/hle/kernel/k_handle_table.cpp
@@ -1,6 +1,5 @@
-// Copyright 2021 yuzu emulator team
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#include "core/hle/kernel/k_handle_table.h"
@@ -9,7 +8,7 @@ namespace Kernel {
KHandleTable::KHandleTable(KernelCore& kernel_) : kernel{kernel_} {}
KHandleTable::~KHandleTable() = default;
-ResultCode KHandleTable::Finalize() {
+Result KHandleTable::Finalize() {
// Get the table and clear our record of it.
u16 saved_table_size = 0;
{
@@ -63,7 +62,7 @@ bool KHandleTable::Remove(Handle handle) {
return true;
}
-ResultCode KHandleTable::Add(Handle* out_handle, KAutoObject* obj, u16 type) {
+Result KHandleTable::Add(Handle* out_handle, KAutoObject* obj) {
KScopedDisableDispatch dd(kernel);
KScopedSpinLock lk(m_lock);
@@ -75,7 +74,7 @@ ResultCode KHandleTable::Add(Handle* out_handle, KAutoObject* obj, u16 type) {
const auto linear_id = this->AllocateLinearId();
const auto index = this->AllocateEntry();
- m_entry_infos[index].info = {.linear_id = linear_id, .type = type};
+ m_entry_infos[index].linear_id = linear_id;
m_objects[index] = obj;
obj->Open();
@@ -86,7 +85,7 @@ ResultCode KHandleTable::Add(Handle* out_handle, KAutoObject* obj, u16 type) {
return ResultSuccess;
}
-ResultCode KHandleTable::Reserve(Handle* out_handle) {
+Result KHandleTable::Reserve(Handle* out_handle) {
KScopedDisableDispatch dd(kernel);
KScopedSpinLock lk(m_lock);
@@ -116,7 +115,7 @@ void KHandleTable::Unreserve(Handle handle) {
}
}
-void KHandleTable::Register(Handle handle, KAutoObject* obj, u16 type) {
+void KHandleTable::Register(Handle handle, KAutoObject* obj) {
KScopedDisableDispatch dd(kernel);
KScopedSpinLock lk(m_lock);
@@ -132,7 +131,7 @@ void KHandleTable::Register(Handle handle, KAutoObject* obj, u16 type) {
// Set the entry.
ASSERT(m_objects[index] == nullptr);
- m_entry_infos[index].info = {.linear_id = static_cast<u16>(linear_id), .type = type};
+ m_entry_infos[index].linear_id = static_cast<u16>(linear_id);
m_objects[index] = obj;
obj->Open();
diff --git a/src/core/hle/kernel/k_handle_table.h b/src/core/hle/kernel/k_handle_table.h
index 87004a0f9..0864a737c 100644
--- a/src/core/hle/kernel/k_handle_table.h
+++ b/src/core/hle/kernel/k_handle_table.h
@@ -1,6 +1,5 @@
-// Copyright 2021 yuzu emulator team
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
@@ -31,7 +30,7 @@ public:
explicit KHandleTable(KernelCore& kernel_);
~KHandleTable();
- ResultCode Initialize(s32 size) {
+ Result Initialize(s32 size) {
R_UNLESS(size <= static_cast<s32>(MaxTableSize), ResultOutOfMemory);
// Initialize all fields.
@@ -42,7 +41,7 @@ public:
m_free_head_index = -1;
// Free all entries.
- for (s32 i = 0; i < static_cast<s32>(m_table_size); ++i) {
+ for (s16 i = 0; i < static_cast<s16>(m_table_size); ++i) {
m_objects[i] = nullptr;
m_entry_infos[i].next_free_index = i - 1;
m_free_head_index = i;
@@ -61,7 +60,7 @@ public:
return m_max_count;
}
- ResultCode Finalize();
+ Result Finalize();
bool Remove(Handle handle);
template <typename T = KAutoObject>
@@ -101,20 +100,11 @@ public:
return this->template GetObjectWithoutPseudoHandle<T>(handle);
}
- ResultCode Reserve(Handle* out_handle);
+ Result Reserve(Handle* out_handle);
void Unreserve(Handle handle);
- template <typename T>
- ResultCode Add(Handle* out_handle, T* obj) {
- static_assert(std::is_base_of_v<KAutoObject, T>);
- return this->Add(out_handle, obj, obj->GetTypeObj().GetClassToken());
- }
-
- template <typename T>
- void Register(Handle handle, T* obj) {
- static_assert(std::is_base_of_v<KAutoObject, T>);
- return this->Register(handle, obj, obj->GetTypeObj().GetClassToken());
- }
+ Result Add(Handle* out_handle, KAutoObject* obj);
+ void Register(Handle handle, KAutoObject* obj);
template <typename T>
bool GetMultipleObjects(T** out, const Handle* handles, size_t num_handles) const {
@@ -160,9 +150,6 @@ public:
}
private:
- ResultCode Add(Handle* out_handle, KAutoObject* obj, u16 type);
- void Register(Handle handle, KAutoObject* obj, u16 type);
-
s32 AllocateEntry() {
ASSERT(m_count < m_table_size);
@@ -179,7 +166,7 @@ private:
ASSERT(m_count > 0);
m_objects[index] = nullptr;
- m_entry_infos[index].next_free_index = m_free_head_index;
+ m_entry_infos[index].next_free_index = static_cast<s16>(m_free_head_index);
m_free_head_index = index;
@@ -278,19 +265,13 @@ private:
}
union EntryInfo {
- struct {
- u16 linear_id;
- u16 type;
- } info;
- s32 next_free_index;
+ u16 linear_id;
+ s16 next_free_index;
constexpr u16 GetLinearId() const {
- return info.linear_id;
- }
- constexpr u16 GetType() const {
- return info.type;
+ return linear_id;
}
- constexpr s32 GetNextFreeIndex() const {
+ constexpr s16 GetNextFreeIndex() const {
return next_free_index;
}
};
diff --git a/src/core/hle/kernel/k_interrupt_manager.cpp b/src/core/hle/kernel/k_interrupt_manager.cpp
index e5dd39751..1b577a5b3 100644
--- a/src/core/hle/kernel/k_interrupt_manager.cpp
+++ b/src/core/hle/kernel/k_interrupt_manager.cpp
@@ -1,12 +1,12 @@
-// Copyright 2021 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#include "core/hle/kernel/k_interrupt_manager.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h"
+#include "core/hle/kernel/physical_core.h"
namespace Kernel::KInterruptManager {
@@ -16,8 +16,10 @@ void HandleInterrupt(KernelCore& kernel, s32 core_id) {
return;
}
- auto& scheduler = kernel.Scheduler(core_id);
- auto& current_thread = *scheduler.GetCurrentThread();
+ // Acknowledge the interrupt.
+ kernel.PhysicalCore(core_id).ClearInterrupt();
+
+ auto& current_thread = GetCurrentThread(kernel);
// If the user disable count is set, we may need to pin the current thread.
if (current_thread.GetUserDisableCount() && !process->GetPinnedThread(core_id)) {
@@ -27,8 +29,11 @@ void HandleInterrupt(KernelCore& kernel, s32 core_id) {
process->PinCurrentThread(core_id);
// Set the interrupt flag for the thread.
- scheduler.GetCurrentThread()->SetInterruptFlag();
+ GetCurrentThread(kernel).SetInterruptFlag();
}
+
+ // Request interrupt scheduling.
+ kernel.CurrentScheduler()->RequestScheduleOnInterrupt();
}
} // namespace Kernel::KInterruptManager
diff --git a/src/core/hle/kernel/k_interrupt_manager.h b/src/core/hle/kernel/k_interrupt_manager.h
index 05924801e..f103dfe3f 100644
--- a/src/core/hle/kernel/k_interrupt_manager.h
+++ b/src/core/hle/kernel/k_interrupt_manager.h
@@ -1,6 +1,5 @@
-// Copyright 2021 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
diff --git a/src/core/hle/kernel/k_light_condition_variable.cpp b/src/core/hle/kernel/k_light_condition_variable.cpp
index a8001fffc..cade99cfd 100644
--- a/src/core/hle/kernel/k_light_condition_variable.cpp
+++ b/src/core/hle/kernel/k_light_condition_variable.cpp
@@ -1,6 +1,5 @@
-// Copyright 2021 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#include "core/hle/kernel/k_light_condition_variable.h"
#include "core/hle/kernel/k_scheduler.h"
@@ -18,8 +17,7 @@ public:
bool term)
: KThreadQueue(kernel_), m_wait_list(wl), m_allow_terminating_thread(term) {}
- void CancelWait(KThread* waiting_thread, ResultCode wait_result,
- bool cancel_timer_task) override {
+ void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override {
// Only process waits if we're allowed to.
if (ResultTerminationRequested == wait_result && m_allow_terminating_thread) {
return;
diff --git a/src/core/hle/kernel/k_light_condition_variable.h b/src/core/hle/kernel/k_light_condition_variable.h
index 5d6d7f128..3cabd6b4f 100644
--- a/src/core/hle/kernel/k_light_condition_variable.h
+++ b/src/core/hle/kernel/k_light_condition_variable.h
@@ -1,6 +1,5 @@
-// Copyright 2020 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
diff --git a/src/core/hle/kernel/k_light_lock.cpp b/src/core/hle/kernel/k_light_lock.cpp
index 4620342eb..43185320d 100644
--- a/src/core/hle/kernel/k_light_lock.cpp
+++ b/src/core/hle/kernel/k_light_lock.cpp
@@ -1,6 +1,5 @@
-// Copyright 2021 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#include "core/hle/kernel/k_light_lock.h"
#include "core/hle/kernel/k_scheduler.h"
@@ -16,8 +15,7 @@ class ThreadQueueImplForKLightLock final : public KThreadQueue {
public:
explicit ThreadQueueImplForKLightLock(KernelCore& kernel_) : KThreadQueue(kernel_) {}
- void CancelWait(KThread* waiting_thread, ResultCode wait_result,
- bool cancel_timer_task) override {
+ void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override {
// Remove the thread as a waiter from its owner.
if (KThread* owner = waiting_thread->GetLockOwner(); owner != nullptr) {
owner->RemoveWaiter(waiting_thread);
diff --git a/src/core/hle/kernel/k_light_lock.h b/src/core/hle/kernel/k_light_lock.h
index 4163b8a85..7edd950c0 100644
--- a/src/core/hle/kernel/k_light_lock.h
+++ b/src/core/hle/kernel/k_light_lock.h
@@ -1,6 +1,5 @@
-// Copyright 2021 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
diff --git a/src/core/hle/kernel/k_linked_list.h b/src/core/hle/kernel/k_linked_list.h
index 6adfe1e34..78859ced3 100644
--- a/src/core/hle/kernel/k_linked_list.h
+++ b/src/core/hle/kernel/k_linked_list.h
@@ -1,6 +1,5 @@
-// Copyright 2021 yuzu emulator team
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
diff --git a/src/core/hle/kernel/k_memory_block.h b/src/core/hle/kernel/k_memory_block.h
index dcca47f1b..18df1f836 100644
--- a/src/core/hle/kernel/k_memory_block.h
+++ b/src/core/hle/kernel/k_memory_block.h
@@ -1,6 +1,5 @@
-// Copyright 2020 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
diff --git a/src/core/hle/kernel/k_memory_block_manager.cpp b/src/core/hle/kernel/k_memory_block_manager.cpp
index fc7033564..3ddb9984f 100644
--- a/src/core/hle/kernel/k_memory_block_manager.cpp
+++ b/src/core/hle/kernel/k_memory_block_manager.cpp
@@ -1,6 +1,5 @@
-// Copyright 2020 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#include "core/hle/kernel/k_memory_block_manager.h"
#include "core/hle/kernel/memory_types.h"
diff --git a/src/core/hle/kernel/k_memory_block_manager.h b/src/core/hle/kernel/k_memory_block_manager.h
index d222da919..e14741b89 100644
--- a/src/core/hle/kernel/k_memory_block_manager.h
+++ b/src/core/hle/kernel/k_memory_block_manager.h
@@ -1,6 +1,5 @@
-// Copyright 2020 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
diff --git a/src/core/hle/kernel/k_memory_layout.board.nintendo_nx.cpp b/src/core/hle/kernel/k_memory_layout.board.nintendo_nx.cpp
index af652af58..098ba6eac 100644
--- a/src/core/hle/kernel/k_memory_layout.board.nintendo_nx.cpp
+++ b/src/core/hle/kernel/k_memory_layout.board.nintendo_nx.cpp
@@ -1,6 +1,5 @@
-// Copyright 2021 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#include "common/alignment.h"
#include "common/literals.h"
diff --git a/src/core/hle/kernel/k_memory_layout.cpp b/src/core/hle/kernel/k_memory_layout.cpp
index fb1e2435f..55dc296d0 100644
--- a/src/core/hle/kernel/k_memory_layout.cpp
+++ b/src/core/hle/kernel/k_memory_layout.cpp
@@ -1,6 +1,5 @@
-// Copyright 2021 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#include <array>
diff --git a/src/core/hle/kernel/k_memory_layout.h b/src/core/hle/kernel/k_memory_layout.h
index 57ff538cc..884fc623a 100644
--- a/src/core/hle/kernel/k_memory_layout.h
+++ b/src/core/hle/kernel/k_memory_layout.h
@@ -1,6 +1,5 @@
-// Copyright 2021 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
@@ -57,11 +56,11 @@ constexpr std::size_t KernelPageTableHeapSize = GetMaximumOverheadSize(MainMemor
constexpr std::size_t KernelInitialPageHeapSize = 128_KiB;
constexpr std::size_t KernelSlabHeapDataSize = 5_MiB;
-constexpr std::size_t KernelSlabHeapGapsSize = 2_MiB - 64_KiB;
-constexpr std::size_t KernelSlabHeapSize = KernelSlabHeapDataSize + KernelSlabHeapGapsSize;
+constexpr std::size_t KernelSlabHeapGapsSizeMax = 2_MiB - 64_KiB;
+constexpr std::size_t KernelSlabHeapSize = KernelSlabHeapDataSize + KernelSlabHeapGapsSizeMax;
// NOTE: This is calculated from KThread slab counts, assuming KThread size <= 0x860.
-constexpr std::size_t KernelSlabHeapAdditionalSize = 416_KiB;
+constexpr std::size_t KernelSlabHeapAdditionalSize = 0x68000;
constexpr std::size_t KernelResourceSize =
KernelPageTableHeapSize + KernelInitialPageHeapSize + KernelSlabHeapSize;
@@ -173,6 +172,10 @@ public:
return Dereference(FindVirtualLinear(address));
}
+ const KMemoryRegion& GetPhysicalLinearRegion(PAddr address) const {
+ return Dereference(FindPhysicalLinear(address));
+ }
+
const KMemoryRegion* GetPhysicalKernelTraceBufferRegion() const {
return GetPhysicalMemoryRegionTree().FindFirstDerived(KMemoryRegionType_KernelTraceBuffer);
}
diff --git a/src/core/hle/kernel/k_memory_manager.cpp b/src/core/hle/kernel/k_memory_manager.cpp
index 1b44541b1..5b0a9963a 100644
--- a/src/core/hle/kernel/k_memory_manager.cpp
+++ b/src/core/hle/kernel/k_memory_manager.cpp
@@ -1,6 +1,5 @@
-// Copyright 2020 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#include <algorithm>
@@ -10,189 +9,411 @@
#include "common/scope_exit.h"
#include "core/core.h"
#include "core/device_memory.h"
+#include "core/hle/kernel/initial_process.h"
#include "core/hle/kernel/k_memory_manager.h"
-#include "core/hle/kernel/k_page_linked_list.h"
+#include "core/hle/kernel/k_page_group.h"
+#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/svc_results.h"
namespace Kernel {
-KMemoryManager::KMemoryManager(Core::System& system_) : system{system_} {}
+namespace {
+
+constexpr KMemoryManager::Pool GetPoolFromMemoryRegionType(u32 type) {
+ if ((type | KMemoryRegionType_DramApplicationPool) == type) {
+ return KMemoryManager::Pool::Application;
+ } else if ((type | KMemoryRegionType_DramAppletPool) == type) {
+ return KMemoryManager::Pool::Applet;
+ } else if ((type | KMemoryRegionType_DramSystemPool) == type) {
+ return KMemoryManager::Pool::System;
+ } else if ((type | KMemoryRegionType_DramSystemNonSecurePool) == type) {
+ return KMemoryManager::Pool::SystemNonSecure;
+ } else {
+ ASSERT_MSG(false, "InvalidMemoryRegionType for conversion to Pool");
+ return {};
+ }
+}
-std::size_t KMemoryManager::Impl::Initialize(Pool new_pool, u64 start_address, u64 end_address) {
- const auto size{end_address - start_address};
+} // namespace
+
+KMemoryManager::KMemoryManager(Core::System& system_)
+ : system{system_}, pool_locks{
+ KLightLock{system_.Kernel()},
+ KLightLock{system_.Kernel()},
+ KLightLock{system_.Kernel()},
+ KLightLock{system_.Kernel()},
+ } {}
+
+void KMemoryManager::Initialize(VAddr management_region, size_t management_region_size) {
+
+ // Clear the management region to zero.
+ const VAddr management_region_end = management_region + management_region_size;
+
+ // Reset our manager count.
+ num_managers = 0;
+
+ // Traverse the virtual memory layout tree, initializing each manager as appropriate.
+ while (num_managers != MaxManagerCount) {
+ // Locate the region that should initialize the current manager.
+ PAddr region_address = 0;
+ size_t region_size = 0;
+ Pool region_pool = Pool::Count;
+ for (const auto& it : system.Kernel().MemoryLayout().GetPhysicalMemoryRegionTree()) {
+ // We only care about regions that we need to create managers for.
+ if (!it.IsDerivedFrom(KMemoryRegionType_DramUserPool)) {
+ continue;
+ }
- // Calculate metadata sizes
- const auto ref_count_size{(size / PageSize) * sizeof(u16)};
- const auto optimize_map_size{(Common::AlignUp((size / PageSize), 64) / 64) * sizeof(u64)};
- const auto manager_size{Common::AlignUp(optimize_map_size + ref_count_size, PageSize)};
- const auto page_heap_size{KPageHeap::CalculateManagementOverheadSize(size)};
- const auto total_metadata_size{manager_size + page_heap_size};
- ASSERT(manager_size <= total_metadata_size);
- ASSERT(Common::IsAligned(total_metadata_size, PageSize));
+ // We want to initialize the managers in order.
+ if (it.GetAttributes() != num_managers) {
+ continue;
+ }
- // Setup region
- pool = new_pool;
+ const PAddr cur_start = it.GetAddress();
+ const PAddr cur_end = it.GetEndAddress();
+
+ // Validate the region.
+ ASSERT(cur_end != 0);
+ ASSERT(cur_start != 0);
+ ASSERT(it.GetSize() > 0);
+
+ // Update the region's extents.
+ if (region_address == 0) {
+ region_address = cur_start;
+ region_size = it.GetSize();
+ region_pool = GetPoolFromMemoryRegionType(it.GetType());
+ } else {
+ ASSERT(cur_start == region_address + region_size);
+
+ // Update the size.
+ region_size = cur_end - region_address;
+ ASSERT(GetPoolFromMemoryRegionType(it.GetType()) == region_pool);
+ }
+ }
+
+ // If we didn't find a region, we're done.
+ if (region_size == 0) {
+ break;
+ }
- // Initialize the manager's KPageHeap
- heap.Initialize(start_address, size, page_heap_size);
+ // Initialize a new manager for the region.
+ Impl* manager = std::addressof(managers[num_managers++]);
+ ASSERT(num_managers <= managers.size());
+
+ const size_t cur_size = manager->Initialize(region_address, region_size, management_region,
+ management_region_end, region_pool);
+ management_region += cur_size;
+ ASSERT(management_region <= management_region_end);
+
+ // Insert the manager into the pool list.
+ const auto region_pool_index = static_cast<u32>(region_pool);
+ if (pool_managers_tail[region_pool_index] == nullptr) {
+ pool_managers_head[region_pool_index] = manager;
+ } else {
+ pool_managers_tail[region_pool_index]->SetNext(manager);
+ manager->SetPrev(pool_managers_tail[region_pool_index]);
+ }
+ pool_managers_tail[region_pool_index] = manager;
+ }
- // Free the memory to the heap
- heap.Free(start_address, size / PageSize);
+ // Free each region to its corresponding heap.
+ size_t reserved_sizes[MaxManagerCount] = {};
+ const PAddr ini_start = GetInitialProcessBinaryPhysicalAddress();
+ const PAddr ini_end = ini_start + InitialProcessBinarySizeMax;
+ const PAddr ini_last = ini_end - 1;
+ for (const auto& it : system.Kernel().MemoryLayout().GetPhysicalMemoryRegionTree()) {
+ if (it.IsDerivedFrom(KMemoryRegionType_DramUserPool)) {
+ // Get the manager for the region.
+ auto index = it.GetAttributes();
+ auto& manager = managers[index];
+
+ const PAddr cur_start = it.GetAddress();
+ const PAddr cur_last = it.GetLastAddress();
+ const PAddr cur_end = it.GetEndAddress();
+
+ if (cur_start <= ini_start && ini_last <= cur_last) {
+ // Free memory before the ini to the heap.
+ if (cur_start != ini_start) {
+ manager.Free(cur_start, (ini_start - cur_start) / PageSize);
+ }
- // Update the heap's used size
- heap.UpdateUsedSize();
+ // Open/reserve the ini memory.
+ manager.OpenFirst(ini_start, InitialProcessBinarySizeMax / PageSize);
+ reserved_sizes[it.GetAttributes()] += InitialProcessBinarySizeMax;
- return total_metadata_size;
-}
+ // Free memory after the ini to the heap.
+ if (ini_last != cur_last) {
+ ASSERT(cur_end != 0);
+ manager.Free(ini_end, cur_end - ini_end);
+ }
+ } else {
+ // Ensure there's no partial overlap with the ini image.
+ if (cur_start <= ini_last) {
+ ASSERT(cur_last < ini_start);
+ } else {
+ // Otherwise, check the region for general validity.
+ ASSERT(cur_end != 0);
+ }
-void KMemoryManager::InitializeManager(Pool pool, u64 start_address, u64 end_address) {
- ASSERT(pool < Pool::Count);
- managers[static_cast<std::size_t>(pool)].Initialize(pool, start_address, end_address);
+ // Free the memory to the heap.
+ manager.Free(cur_start, it.GetSize() / PageSize);
+ }
+ }
+ }
+
+ // Update the used size for all managers.
+ for (size_t i = 0; i < num_managers; ++i) {
+ managers[i].SetInitialUsedHeapSize(reserved_sizes[i]);
+ }
}
-VAddr KMemoryManager::AllocateAndOpenContinuous(std::size_t num_pages, std::size_t align_pages,
- u32 option) {
- // Early return if we're allocating no pages
+PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option) {
+ // Early return if we're allocating no pages.
if (num_pages == 0) {
- return {};
+ return 0;
}
- // Lock the pool that we're allocating from
+ // Lock the pool that we're allocating from.
const auto [pool, dir] = DecodeOption(option);
- const auto pool_index{static_cast<std::size_t>(pool)};
- std::lock_guard lock{pool_locks[pool_index]};
-
- // Choose a heap based on our page size request
- const s32 heap_index{KPageHeap::GetAlignedBlockIndex(num_pages, align_pages)};
-
- // Loop, trying to iterate from each block
- // TODO (bunnei): Support multiple managers
- Impl& chosen_manager{managers[pool_index]};
- VAddr allocated_block{chosen_manager.AllocateBlock(heap_index, false)};
+ KScopedLightLock lk(pool_locks[static_cast<std::size_t>(pool)]);
+
+ // Choose a heap based on our page size request.
+ const s32 heap_index = KPageHeap::GetAlignedBlockIndex(num_pages, align_pages);
+
+ // Loop, trying to iterate from each block.
+ Impl* chosen_manager = nullptr;
+ PAddr allocated_block = 0;
+ for (chosen_manager = this->GetFirstManager(pool, dir); chosen_manager != nullptr;
+ chosen_manager = this->GetNextManager(chosen_manager, dir)) {
+ allocated_block = chosen_manager->AllocateBlock(heap_index, true);
+ if (allocated_block != 0) {
+ break;
+ }
+ }
- // If we failed to allocate, quit now
- if (!allocated_block) {
- return {};
+ // If we failed to allocate, quit now.
+ if (allocated_block == 0) {
+ return 0;
}
- // If we allocated more than we need, free some
- const auto allocated_pages{KPageHeap::GetBlockNumPages(heap_index)};
+ // If we allocated more than we need, free some.
+ const size_t allocated_pages = KPageHeap::GetBlockNumPages(heap_index);
if (allocated_pages > num_pages) {
- chosen_manager.Free(allocated_block + num_pages * PageSize, allocated_pages - num_pages);
+ chosen_manager->Free(allocated_block + num_pages * PageSize, allocated_pages - num_pages);
}
+ // Open the first reference to the pages.
+ chosen_manager->OpenFirst(allocated_block, num_pages);
+
return allocated_block;
}
-ResultCode KMemoryManager::Allocate(KPageLinkedList& page_list, std::size_t num_pages, Pool pool,
- Direction dir, u32 heap_fill_value) {
- ASSERT(page_list.GetNumPages() == 0);
+Result KMemoryManager::AllocatePageGroupImpl(KPageGroup* out, size_t num_pages, Pool pool,
+ Direction dir, bool random) {
+ // Choose a heap based on our page size request.
+ const s32 heap_index = KPageHeap::GetBlockIndex(num_pages);
+ R_UNLESS(0 <= heap_index, ResultOutOfMemory);
+
+ // Ensure that we don't leave anything un-freed.
+ auto group_guard = SCOPE_GUARD({
+ for (const auto& it : out->Nodes()) {
+ auto& manager = this->GetManager(system.Kernel().MemoryLayout(), it.GetAddress());
+ const size_t num_pages_to_free =
+ std::min(it.GetNumPages(), (manager.GetEndAddress() - it.GetAddress()) / PageSize);
+ manager.Free(it.GetAddress(), num_pages_to_free);
+ }
+ });
- // Early return if we're allocating no pages
- if (num_pages == 0) {
- return ResultSuccess;
- }
+ // Keep allocating until we've allocated all our pages.
+ for (s32 index = heap_index; index >= 0 && num_pages > 0; index--) {
+ const size_t pages_per_alloc = KPageHeap::GetBlockNumPages(index);
+ for (Impl* cur_manager = this->GetFirstManager(pool, dir); cur_manager != nullptr;
+ cur_manager = this->GetNextManager(cur_manager, dir)) {
+ while (num_pages >= pages_per_alloc) {
+ // Allocate a block.
+ PAddr allocated_block = cur_manager->AllocateBlock(index, random);
+ if (allocated_block == 0) {
+ break;
+ }
- // Lock the pool that we're allocating from
- const auto pool_index{static_cast<std::size_t>(pool)};
- std::lock_guard lock{pool_locks[pool_index]};
+ // Safely add it to our group.
+ {
+ auto block_guard =
+ SCOPE_GUARD({ cur_manager->Free(allocated_block, pages_per_alloc); });
+ R_TRY(out->AddBlock(allocated_block, pages_per_alloc));
+ block_guard.Cancel();
+ }
- // Choose a heap based on our page size request
- const s32 heap_index{KPageHeap::GetBlockIndex(num_pages)};
- if (heap_index < 0) {
- return ResultOutOfMemory;
+ num_pages -= pages_per_alloc;
+ }
+ }
}
- // TODO (bunnei): Support multiple managers
- Impl& chosen_manager{managers[pool_index]};
+ // Only succeed if we allocated as many pages as we wanted.
+ R_UNLESS(num_pages == 0, ResultOutOfMemory);
- // Ensure that we don't leave anything un-freed
- auto group_guard = detail::ScopeExit([&] {
- for (const auto& it : page_list.Nodes()) {
- const auto min_num_pages{std::min<size_t>(
- it.GetNumPages(), (chosen_manager.GetEndAddress() - it.GetAddress()) / PageSize)};
- chosen_manager.Free(it.GetAddress(), min_num_pages);
- }
- });
+ // We succeeded!
+ group_guard.Cancel();
+ return ResultSuccess;
+}
- // Keep allocating until we've allocated all our pages
- for (s32 index{heap_index}; index >= 0 && num_pages > 0; index--) {
- const auto pages_per_alloc{KPageHeap::GetBlockNumPages(index)};
+Result KMemoryManager::AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 option) {
+ ASSERT(out != nullptr);
+ ASSERT(out->GetNumPages() == 0);
- while (num_pages >= pages_per_alloc) {
- // Allocate a block
- VAddr allocated_block{chosen_manager.AllocateBlock(index, false)};
- if (!allocated_block) {
- break;
- }
+ // Early return if we're allocating no pages.
+ R_SUCCEED_IF(num_pages == 0);
- // Safely add it to our group
- {
- auto block_guard = detail::ScopeExit(
- [&] { chosen_manager.Free(allocated_block, pages_per_alloc); });
+ // Lock the pool that we're allocating from.
+ const auto [pool, dir] = DecodeOption(option);
+ KScopedLightLock lk(pool_locks[static_cast<size_t>(pool)]);
+
+ // Allocate the page group.
+ R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, false));
+
+ // Open the first reference to the pages.
+ for (const auto& block : out->Nodes()) {
+ PAddr cur_address = block.GetAddress();
+ size_t remaining_pages = block.GetNumPages();
+ while (remaining_pages > 0) {
+ // Get the manager for the current address.
+ auto& manager = this->GetManager(system.Kernel().MemoryLayout(), cur_address);
+
+ // Process part or all of the block.
+ const size_t cur_pages =
+ std::min(remaining_pages, manager.GetPageOffsetToEnd(cur_address));
+ manager.OpenFirst(cur_address, cur_pages);
+
+ // Advance.
+ cur_address += cur_pages * PageSize;
+ remaining_pages -= cur_pages;
+ }
+ }
- if (const ResultCode result{page_list.AddBlock(allocated_block, pages_per_alloc)};
- result.IsError()) {
- return result;
- }
+ return ResultSuccess;
+}
- block_guard.Cancel();
- }
+Result KMemoryManager::AllocateAndOpenForProcess(KPageGroup* out, size_t num_pages, u32 option,
+ u64 process_id, u8 fill_pattern) {
+ ASSERT(out != nullptr);
+ ASSERT(out->GetNumPages() == 0);
- num_pages -= pages_per_alloc;
- }
- }
+ // Decode the option.
+ const auto [pool, dir] = DecodeOption(option);
- // Clear allocated memory.
- for (const auto& it : page_list.Nodes()) {
- std::memset(system.DeviceMemory().GetPointer(it.GetAddress()), heap_fill_value,
- it.GetSize());
+ // Allocate the memory.
+ {
+ // Lock the pool that we're allocating from.
+ KScopedLightLock lk(pool_locks[static_cast<size_t>(pool)]);
+
+ // Allocate the page group.
+ R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, false));
+
+ // Open the first reference to the pages.
+ for (const auto& block : out->Nodes()) {
+ PAddr cur_address = block.GetAddress();
+ size_t remaining_pages = block.GetNumPages();
+ while (remaining_pages > 0) {
+ // Get the manager for the current address.
+ auto& manager = this->GetManager(system.Kernel().MemoryLayout(), cur_address);
+
+ // Process part or all of the block.
+ const size_t cur_pages =
+ std::min(remaining_pages, manager.GetPageOffsetToEnd(cur_address));
+ manager.OpenFirst(cur_address, cur_pages);
+
+ // Advance.
+ cur_address += cur_pages * PageSize;
+ remaining_pages -= cur_pages;
+ }
+ }
}
- // Only succeed if we allocated as many pages as we wanted
- if (num_pages) {
- return ResultOutOfMemory;
+ // Set all the allocated memory.
+ for (const auto& block : out->Nodes()) {
+ std::memset(system.DeviceMemory().GetPointer(block.GetAddress()), fill_pattern,
+ block.GetSize());
}
- // We succeeded!
- group_guard.Cancel();
-
return ResultSuccess;
}
-ResultCode KMemoryManager::Free(KPageLinkedList& page_list, std::size_t num_pages, Pool pool,
- Direction dir, u32 heap_fill_value) {
- // Early return if we're freeing no pages
- if (!num_pages) {
- return ResultSuccess;
+void KMemoryManager::Open(PAddr address, size_t num_pages) {
+ // Repeatedly open references until we've done so for all pages.
+ while (num_pages) {
+ auto& manager = this->GetManager(system.Kernel().MemoryLayout(), address);
+ const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address));
+
+ {
+ KScopedLightLock lk(pool_locks[static_cast<size_t>(manager.GetPool())]);
+ manager.Open(address, cur_pages);
+ }
+
+ num_pages -= cur_pages;
+ address += cur_pages * PageSize;
}
+}
- // Lock the pool that we're freeing from
- const auto pool_index{static_cast<std::size_t>(pool)};
- std::lock_guard lock{pool_locks[pool_index]};
+void KMemoryManager::Close(PAddr address, size_t num_pages) {
+ // Repeatedly close references until we've done so for all pages.
+ while (num_pages) {
+ auto& manager = this->GetManager(system.Kernel().MemoryLayout(), address);
+ const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address));
- // TODO (bunnei): Support multiple managers
- Impl& chosen_manager{managers[pool_index]};
+ {
+ KScopedLightLock lk(pool_locks[static_cast<size_t>(manager.GetPool())]);
+ manager.Close(address, cur_pages);
+ }
- // Free all of the pages
- for (const auto& it : page_list.Nodes()) {
- const auto min_num_pages{std::min<size_t>(
- it.GetNumPages(), (chosen_manager.GetEndAddress() - it.GetAddress()) / PageSize)};
- chosen_manager.Free(it.GetAddress(), min_num_pages);
+ num_pages -= cur_pages;
+ address += cur_pages * PageSize;
}
+}
- return ResultSuccess;
+void KMemoryManager::Close(const KPageGroup& pg) {
+ for (const auto& node : pg.Nodes()) {
+ Close(node.GetAddress(), node.GetNumPages());
+ }
+}
+void KMemoryManager::Open(const KPageGroup& pg) {
+ for (const auto& node : pg.Nodes()) {
+ Open(node.GetAddress(), node.GetNumPages());
+ }
+}
+
+size_t KMemoryManager::Impl::Initialize(PAddr address, size_t size, VAddr management,
+ VAddr management_end, Pool p) {
+ // Calculate management sizes.
+ const size_t ref_count_size = (size / PageSize) * sizeof(u16);
+ const size_t optimize_map_size = CalculateOptimizedProcessOverheadSize(size);
+ const size_t manager_size = Common::AlignUp(optimize_map_size + ref_count_size, PageSize);
+ const size_t page_heap_size = KPageHeap::CalculateManagementOverheadSize(size);
+ const size_t total_management_size = manager_size + page_heap_size;
+ ASSERT(manager_size <= total_management_size);
+ ASSERT(management + total_management_size <= management_end);
+ ASSERT(Common::IsAligned(total_management_size, PageSize));
+
+ // Setup region.
+ pool = p;
+ management_region = management;
+ page_reference_counts.resize(
+ Kernel::Board::Nintendo::Nx::KSystemControl::Init::GetIntendedMemorySize() / PageSize);
+ ASSERT(Common::IsAligned(management_region, PageSize));
+
+ // Initialize the manager's KPageHeap.
+ heap.Initialize(address, size, management + manager_size, page_heap_size);
+
+ return total_management_size;
}
-std::size_t KMemoryManager::Impl::CalculateManagementOverheadSize(std::size_t region_size) {
- const std::size_t ref_count_size = (region_size / PageSize) * sizeof(u16);
- const std::size_t optimize_map_size =
+size_t KMemoryManager::Impl::CalculateManagementOverheadSize(size_t region_size) {
+ const size_t ref_count_size = (region_size / PageSize) * sizeof(u16);
+ const size_t optimize_map_size =
(Common::AlignUp((region_size / PageSize), Common::BitSize<u64>()) /
Common::BitSize<u64>()) *
sizeof(u64);
- const std::size_t manager_meta_size =
- Common::AlignUp(optimize_map_size + ref_count_size, PageSize);
- const std::size_t page_heap_size = KPageHeap::CalculateManagementOverheadSize(region_size);
+ const size_t manager_meta_size = Common::AlignUp(optimize_map_size + ref_count_size, PageSize);
+ const size_t page_heap_size = KPageHeap::CalculateManagementOverheadSize(region_size);
return manager_meta_size + page_heap_size;
}
diff --git a/src/core/hle/kernel/k_memory_manager.h b/src/core/hle/kernel/k_memory_manager.h
index 17c7690f1..dcb9b6348 100644
--- a/src/core/hle/kernel/k_memory_manager.h
+++ b/src/core/hle/kernel/k_memory_manager.h
@@ -1,15 +1,15 @@
-// Copyright 2020 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <array>
-#include <mutex>
#include <tuple>
#include "common/common_funcs.h"
#include "common/common_types.h"
+#include "core/hle/kernel/k_light_lock.h"
+#include "core/hle/kernel/k_memory_layout.h"
#include "core/hle/kernel/k_page_heap.h"
#include "core/hle/result.h"
@@ -19,7 +19,7 @@ class System;
namespace Kernel {
-class KPageLinkedList;
+class KPageGroup;
class KMemoryManager final {
public:
@@ -52,22 +52,33 @@ public:
explicit KMemoryManager(Core::System& system_);
- constexpr std::size_t GetSize(Pool pool) const {
- return managers[static_cast<std::size_t>(pool)].GetSize();
+ void Initialize(VAddr management_region, size_t management_region_size);
+
+ constexpr size_t GetSize(Pool pool) const {
+ constexpr Direction GetSizeDirection = Direction::FromFront;
+ size_t total = 0;
+ for (auto* manager = this->GetFirstManager(pool, GetSizeDirection); manager != nullptr;
+ manager = this->GetNextManager(manager, GetSizeDirection)) {
+ total += manager->GetSize();
+ }
+ return total;
}
- void InitializeManager(Pool pool, u64 start_address, u64 end_address);
+ PAddr AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option);
+ Result AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 option);
+ Result AllocateAndOpenForProcess(KPageGroup* out, size_t num_pages, u32 option, u64 process_id,
+ u8 fill_pattern);
+
+ static constexpr size_t MaxManagerCount = 10;
- VAddr AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option);
- ResultCode Allocate(KPageLinkedList& page_list, std::size_t num_pages, Pool pool, Direction dir,
- u32 heap_fill_value = 0);
- ResultCode Free(KPageLinkedList& page_list, std::size_t num_pages, Pool pool, Direction dir,
- u32 heap_fill_value = 0);
+ void Close(PAddr address, size_t num_pages);
+ void Close(const KPageGroup& pg);
- static constexpr std::size_t MaxManagerCount = 10;
+ void Open(PAddr address, size_t num_pages);
+ void Open(const KPageGroup& pg);
public:
- static std::size_t CalculateManagementOverheadSize(std::size_t region_size) {
+ static size_t CalculateManagementOverheadSize(size_t region_size) {
return Impl::CalculateManagementOverheadSize(region_size);
}
@@ -100,17 +111,26 @@ private:
Impl() = default;
~Impl() = default;
- std::size_t Initialize(Pool new_pool, u64 start_address, u64 end_address);
+ size_t Initialize(PAddr address, size_t size, VAddr management, VAddr management_end,
+ Pool p);
VAddr AllocateBlock(s32 index, bool random) {
return heap.AllocateBlock(index, random);
}
- void Free(VAddr addr, std::size_t num_pages) {
+ void Free(VAddr addr, size_t num_pages) {
heap.Free(addr, num_pages);
}
- constexpr std::size_t GetSize() const {
+ void SetInitialUsedHeapSize(size_t reserved_size) {
+ heap.SetInitialUsedSize(reserved_size);
+ }
+
+ constexpr Pool GetPool() const {
+ return pool;
+ }
+
+ constexpr size_t GetSize() const {
return heap.GetSize();
}
@@ -122,10 +142,88 @@ private:
return heap.GetEndAddress();
}
- static std::size_t CalculateManagementOverheadSize(std::size_t region_size);
+ constexpr size_t GetPageOffset(PAddr address) const {
+ return heap.GetPageOffset(address);
+ }
+
+ constexpr size_t GetPageOffsetToEnd(PAddr address) const {
+ return heap.GetPageOffsetToEnd(address);
+ }
+
+ constexpr void SetNext(Impl* n) {
+ next = n;
+ }
+
+ constexpr void SetPrev(Impl* n) {
+ prev = n;
+ }
+
+ constexpr Impl* GetNext() const {
+ return next;
+ }
+
+ constexpr Impl* GetPrev() const {
+ return prev;
+ }
+
+ void OpenFirst(PAddr address, size_t num_pages) {
+ size_t index = this->GetPageOffset(address);
+ const size_t end = index + num_pages;
+ while (index < end) {
+ const RefCount ref_count = (++page_reference_counts[index]);
+ ASSERT(ref_count == 1);
- static constexpr std::size_t CalculateOptimizedProcessOverheadSize(
- std::size_t region_size) {
+ index++;
+ }
+ }
+
+ void Open(PAddr address, size_t num_pages) {
+ size_t index = this->GetPageOffset(address);
+ const size_t end = index + num_pages;
+ while (index < end) {
+ const RefCount ref_count = (++page_reference_counts[index]);
+ ASSERT(ref_count > 1);
+
+ index++;
+ }
+ }
+
+ void Close(PAddr address, size_t num_pages) {
+ size_t index = this->GetPageOffset(address);
+ const size_t end = index + num_pages;
+
+ size_t free_start = 0;
+ size_t free_count = 0;
+ while (index < end) {
+ ASSERT(page_reference_counts[index] > 0);
+ const RefCount ref_count = (--page_reference_counts[index]);
+
+ // Keep track of how many zero refcounts we see in a row, to minimize calls to free.
+ if (ref_count == 0) {
+ if (free_count > 0) {
+ free_count++;
+ } else {
+ free_start = index;
+ free_count = 1;
+ }
+ } else {
+ if (free_count > 0) {
+ this->Free(heap.GetAddress() + free_start * PageSize, free_count);
+ free_count = 0;
+ }
+ }
+
+ index++;
+ }
+
+ if (free_count > 0) {
+ this->Free(heap.GetAddress() + free_start * PageSize, free_count);
+ }
+ }
+
+ static size_t CalculateManagementOverheadSize(size_t region_size);
+
+ static constexpr size_t CalculateOptimizedProcessOverheadSize(size_t region_size) {
return (Common::AlignUp((region_size / PageSize), Common::BitSize<u64>()) /
Common::BitSize<u64>()) *
sizeof(u64);
@@ -135,13 +233,45 @@ private:
using RefCount = u16;
KPageHeap heap;
+ std::vector<RefCount> page_reference_counts;
+ VAddr management_region{};
Pool pool{};
+ Impl* next{};
+ Impl* prev{};
};
private:
+ Impl& GetManager(const KMemoryLayout& memory_layout, PAddr address) {
+ return managers[memory_layout.GetPhysicalLinearRegion(address).GetAttributes()];
+ }
+
+ const Impl& GetManager(const KMemoryLayout& memory_layout, PAddr address) const {
+ return managers[memory_layout.GetPhysicalLinearRegion(address).GetAttributes()];
+ }
+
+ constexpr Impl* GetFirstManager(Pool pool, Direction dir) const {
+ return dir == Direction::FromBack ? pool_managers_tail[static_cast<size_t>(pool)]
+ : pool_managers_head[static_cast<size_t>(pool)];
+ }
+
+ constexpr Impl* GetNextManager(Impl* cur, Direction dir) const {
+ if (dir == Direction::FromBack) {
+ return cur->GetPrev();
+ } else {
+ return cur->GetNext();
+ }
+ }
+
+ Result AllocatePageGroupImpl(KPageGroup* out, size_t num_pages, Pool pool, Direction dir,
+ bool random);
+
+private:
Core::System& system;
- std::array<std::mutex, static_cast<std::size_t>(Pool::Count)> pool_locks;
+ std::array<KLightLock, static_cast<size_t>(Pool::Count)> pool_locks;
+ std::array<Impl*, MaxManagerCount> pool_managers_head{};
+ std::array<Impl*, MaxManagerCount> pool_managers_tail{};
std::array<Impl, MaxManagerCount> managers;
+ size_t num_managers{};
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_memory_region.h b/src/core/hle/kernel/k_memory_region.h
index e9bdf4e59..5037e657f 100644
--- a/src/core/hle/kernel/k_memory_region.h
+++ b/src/core/hle/kernel/k_memory_region.h
@@ -1,6 +1,5 @@
-// Copyright 2021 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
diff --git a/src/core/hle/kernel/k_memory_region_type.h b/src/core/hle/kernel/k_memory_region_type.h
index a05e66677..7e2fcccdc 100644
--- a/src/core/hle/kernel/k_memory_region_type.h
+++ b/src/core/hle/kernel/k_memory_region_type.h
@@ -1,6 +1,5 @@
-// Copyright 2021 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
@@ -14,7 +13,8 @@
namespace Kernel {
enum KMemoryRegionType : u32 {
- KMemoryRegionAttr_CarveoutProtected = 0x04000000,
+ KMemoryRegionAttr_CarveoutProtected = 0x02000000,
+ KMemoryRegionAttr_Uncached = 0x04000000,
KMemoryRegionAttr_DidKernelMap = 0x08000000,
KMemoryRegionAttr_ShouldKernelMap = 0x10000000,
KMemoryRegionAttr_UserReadOnly = 0x20000000,
@@ -239,6 +239,11 @@ static_assert(KMemoryRegionType_VirtualDramHeapBase.GetValue() == 0x1A);
static_assert(KMemoryRegionType_VirtualDramKernelPtHeap.GetValue() == 0x2A);
static_assert(KMemoryRegionType_VirtualDramKernelTraceBuffer.GetValue() == 0x4A);
+// UNUSED: .DeriveSparse(2, 2, 0);
+constexpr auto KMemoryRegionType_VirtualDramUnknownDebug =
+ KMemoryRegionType_Dram.DeriveSparse(2, 2, 1);
+static_assert(KMemoryRegionType_VirtualDramUnknownDebug.GetValue() == (0x52));
+
constexpr auto KMemoryRegionType_VirtualDramKernelInitPt =
KMemoryRegionType_VirtualDramHeapBase.Derive(3, 0);
constexpr auto KMemoryRegionType_VirtualDramPoolManagement =
@@ -330,6 +335,8 @@ constexpr KMemoryRegionType GetTypeForVirtualLinearMapping(u32 type_id) {
return KMemoryRegionType_VirtualDramKernelTraceBuffer;
} else if (KMemoryRegionType_DramKernelPtHeap.IsAncestorOf(type_id)) {
return KMemoryRegionType_VirtualDramKernelPtHeap;
+ } else if ((type_id | KMemoryRegionAttr_ShouldKernelMap) == type_id) {
+ return KMemoryRegionType_VirtualDramUnknownDebug;
} else {
return KMemoryRegionType_Dram;
}
diff --git a/src/core/hle/kernel/k_page_bitmap.h b/src/core/hle/kernel/k_page_bitmap.h
index c75d667c9..c97b3dc0b 100644
--- a/src/core/hle/kernel/k_page_bitmap.h
+++ b/src/core/hle/kernel/k_page_bitmap.h
@@ -1,6 +1,5 @@
-// Copyright 2021 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
diff --git a/src/core/hle/kernel/k_page_buffer.cpp b/src/core/hle/kernel/k_page_buffer.cpp
new file mode 100644
index 000000000..1a0bf4439
--- /dev/null
+++ b/src/core/hle/kernel/k_page_buffer.cpp
@@ -0,0 +1,18 @@
+// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "common/alignment.h"
+#include "common/assert.h"
+#include "core/core.h"
+#include "core/device_memory.h"
+#include "core/hle/kernel/k_page_buffer.h"
+#include "core/hle/kernel/memory_types.h"
+
+namespace Kernel {
+
+KPageBuffer* KPageBuffer::FromPhysicalAddress(Core::System& system, PAddr phys_addr) {
+ ASSERT(Common::IsAligned(phys_addr, PageSize));
+ return reinterpret_cast<KPageBuffer*>(system.DeviceMemory().GetPointer(phys_addr));
+}
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_page_buffer.h b/src/core/hle/kernel/k_page_buffer.h
new file mode 100644
index 000000000..7e50dc1d1
--- /dev/null
+++ b/src/core/hle/kernel/k_page_buffer.h
@@ -0,0 +1,27 @@
+// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include <array>
+
+#include "common/common_types.h"
+#include "core/hle/kernel/memory_types.h"
+#include "core/hle/kernel/slab_helpers.h"
+
+namespace Kernel {
+
+class KPageBuffer final : public KSlabAllocated<KPageBuffer> {
+public:
+ KPageBuffer() = default;
+
+ static KPageBuffer* FromPhysicalAddress(Core::System& system, PAddr phys_addr);
+
+private:
+ [[maybe_unused]] alignas(PageSize) std::array<u8, PageSize> m_buffer{};
+};
+
+static_assert(sizeof(KPageBuffer) == PageSize);
+static_assert(alignof(KPageBuffer) == PageSize);
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_page_linked_list.h b/src/core/hle/kernel/k_page_group.h
index 0e2ae582a..968753992 100644
--- a/src/core/hle/kernel/k_page_linked_list.h
+++ b/src/core/hle/kernel/k_page_group.h
@@ -1,6 +1,5 @@
-// Copyright 2020 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
@@ -13,7 +12,7 @@
namespace Kernel {
-class KPageLinkedList final {
+class KPageGroup final {
public:
class Node final {
public:
@@ -37,8 +36,8 @@ public:
};
public:
- KPageLinkedList() = default;
- KPageLinkedList(u64 address, u64 num_pages) {
+ KPageGroup() = default;
+ KPageGroup(u64 address, u64 num_pages) {
ASSERT(AddBlock(address, num_pages).IsSuccess());
}
@@ -58,7 +57,7 @@ public:
return num_pages;
}
- bool IsEqual(KPageLinkedList& other) const {
+ bool IsEqual(KPageGroup& other) const {
auto this_node = nodes.begin();
auto other_node = other.nodes.begin();
while (this_node != nodes.end() && other_node != other.nodes.end()) {
@@ -73,7 +72,7 @@ public:
return this_node == nodes.end() && other_node == other.nodes.end();
}
- ResultCode AddBlock(u64 address, u64 num_pages) {
+ Result AddBlock(u64 address, u64 num_pages) {
if (!num_pages) {
return ResultSuccess;
}
@@ -89,6 +88,10 @@ public:
return ResultSuccess;
}
+ bool Empty() const {
+ return nodes.empty();
+ }
+
private:
std::list<Node> nodes;
};
diff --git a/src/core/hle/kernel/k_page_heap.cpp b/src/core/hle/kernel/k_page_heap.cpp
index 29d996d62..5ede60168 100644
--- a/src/core/hle/kernel/k_page_heap.cpp
+++ b/src/core/hle/kernel/k_page_heap.cpp
@@ -1,41 +1,56 @@
-// Copyright 2020 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#include "core/core.h"
#include "core/hle/kernel/k_page_heap.h"
namespace Kernel {
-void KPageHeap::Initialize(VAddr address, std::size_t size, std::size_t metadata_size) {
- // Check our assumptions
- ASSERT(Common::IsAligned((address), PageSize));
+void KPageHeap::Initialize(PAddr address, size_t size, VAddr management_address,
+ size_t management_size, const size_t* block_shifts,
+ size_t num_block_shifts) {
+ // Check our assumptions.
+ ASSERT(Common::IsAligned(address, PageSize));
ASSERT(Common::IsAligned(size, PageSize));
+ ASSERT(0 < num_block_shifts && num_block_shifts <= NumMemoryBlockPageShifts);
+ const VAddr management_end = management_address + management_size;
- // Set our members
- heap_address = address;
- heap_size = size;
-
- // Setup bitmaps
- metadata.resize(metadata_size / sizeof(u64));
- u64* cur_bitmap_storage{metadata.data()};
- for (std::size_t i = 0; i < MemoryBlockPageShifts.size(); i++) {
- const std::size_t cur_block_shift{MemoryBlockPageShifts[i]};
- const std::size_t next_block_shift{
- (i != MemoryBlockPageShifts.size() - 1) ? MemoryBlockPageShifts[i + 1] : 0};
- cur_bitmap_storage = blocks[i].Initialize(heap_address, heap_size, cur_block_shift,
- next_block_shift, cur_bitmap_storage);
+ // Set our members.
+ m_heap_address = address;
+ m_heap_size = size;
+ m_num_blocks = num_block_shifts;
+
+ // Setup bitmaps.
+ m_management_data.resize(management_size / sizeof(u64));
+ u64* cur_bitmap_storage{m_management_data.data()};
+ for (size_t i = 0; i < num_block_shifts; i++) {
+ const size_t cur_block_shift = block_shifts[i];
+ const size_t next_block_shift = (i != num_block_shifts - 1) ? block_shifts[i + 1] : 0;
+ cur_bitmap_storage = m_blocks[i].Initialize(m_heap_address, m_heap_size, cur_block_shift,
+ next_block_shift, cur_bitmap_storage);
}
+
+ // Ensure we didn't overextend our bounds.
+ ASSERT(VAddr(cur_bitmap_storage) <= management_end);
+}
+
+size_t KPageHeap::GetNumFreePages() const {
+ size_t num_free = 0;
+
+ for (size_t i = 0; i < m_num_blocks; i++) {
+ num_free += m_blocks[i].GetNumFreePages();
+ }
+
+ return num_free;
}
-VAddr KPageHeap::AllocateBlock(s32 index, bool random) {
- const std::size_t needed_size{blocks[index].GetSize()};
+PAddr KPageHeap::AllocateBlock(s32 index, bool random) {
+ const size_t needed_size = m_blocks[index].GetSize();
- for (s32 i{index}; i < static_cast<s32>(MemoryBlockPageShifts.size()); i++) {
- if (const VAddr addr{blocks[i].PopBlock(random)}; addr) {
- if (const std::size_t allocated_size{blocks[i].GetSize()};
- allocated_size > needed_size) {
- Free(addr + needed_size, (allocated_size - needed_size) / PageSize);
+ for (s32 i = index; i < static_cast<s32>(m_num_blocks); i++) {
+ if (const PAddr addr = m_blocks[i].PopBlock(random); addr != 0) {
+ if (const size_t allocated_size = m_blocks[i].GetSize(); allocated_size > needed_size) {
+ this->Free(addr + needed_size, (allocated_size - needed_size) / PageSize);
}
return addr;
}
@@ -44,34 +59,34 @@ VAddr KPageHeap::AllocateBlock(s32 index, bool random) {
return 0;
}
-void KPageHeap::FreeBlock(VAddr block, s32 index) {
+void KPageHeap::FreeBlock(PAddr block, s32 index) {
do {
- block = blocks[index++].PushBlock(block);
+ block = m_blocks[index++].PushBlock(block);
} while (block != 0);
}
-void KPageHeap::Free(VAddr addr, std::size_t num_pages) {
- // Freeing no pages is a no-op
+void KPageHeap::Free(PAddr addr, size_t num_pages) {
+ // Freeing no pages is a no-op.
if (num_pages == 0) {
return;
}
- // Find the largest block size that we can free, and free as many as possible
- s32 big_index{static_cast<s32>(MemoryBlockPageShifts.size()) - 1};
- const VAddr start{addr};
- const VAddr end{(num_pages * PageSize) + addr};
- VAddr before_start{start};
- VAddr before_end{start};
- VAddr after_start{end};
- VAddr after_end{end};
+ // Find the largest block size that we can free, and free as many as possible.
+ s32 big_index = static_cast<s32>(m_num_blocks) - 1;
+ const PAddr start = addr;
+ const PAddr end = addr + num_pages * PageSize;
+ PAddr before_start = start;
+ PAddr before_end = start;
+ PAddr after_start = end;
+ PAddr after_end = end;
while (big_index >= 0) {
- const std::size_t block_size{blocks[big_index].GetSize()};
- const VAddr big_start{Common::AlignUp((start), block_size)};
- const VAddr big_end{Common::AlignDown((end), block_size)};
+ const size_t block_size = m_blocks[big_index].GetSize();
+ const PAddr big_start = Common::AlignUp(start, block_size);
+ const PAddr big_end = Common::AlignDown(end, block_size);
if (big_start < big_end) {
- // Free as many big blocks as we can
- for (auto block{big_start}; block < big_end; block += block_size) {
- FreeBlock(block, big_index);
+ // Free as many big blocks as we can.
+ for (auto block = big_start; block < big_end; block += block_size) {
+ this->FreeBlock(block, big_index);
}
before_end = big_start;
after_start = big_end;
@@ -81,31 +96,31 @@ void KPageHeap::Free(VAddr addr, std::size_t num_pages) {
}
ASSERT(big_index >= 0);
- // Free space before the big blocks
- for (s32 i{big_index - 1}; i >= 0; i--) {
- const std::size_t block_size{blocks[i].GetSize()};
+ // Free space before the big blocks.
+ for (s32 i = big_index - 1; i >= 0; i--) {
+ const size_t block_size = m_blocks[i].GetSize();
while (before_start + block_size <= before_end) {
before_end -= block_size;
- FreeBlock(before_end, i);
+ this->FreeBlock(before_end, i);
}
}
- // Free space after the big blocks
- for (s32 i{big_index - 1}; i >= 0; i--) {
- const std::size_t block_size{blocks[i].GetSize()};
+ // Free space after the big blocks.
+ for (s32 i = big_index - 1; i >= 0; i--) {
+ const size_t block_size = m_blocks[i].GetSize();
while (after_start + block_size <= after_end) {
- FreeBlock(after_start, i);
+ this->FreeBlock(after_start, i);
after_start += block_size;
}
}
}
-std::size_t KPageHeap::CalculateManagementOverheadSize(std::size_t region_size) {
- std::size_t overhead_size = 0;
- for (std::size_t i = 0; i < MemoryBlockPageShifts.size(); i++) {
- const std::size_t cur_block_shift{MemoryBlockPageShifts[i]};
- const std::size_t next_block_shift{
- (i != MemoryBlockPageShifts.size() - 1) ? MemoryBlockPageShifts[i + 1] : 0};
+size_t KPageHeap::CalculateManagementOverheadSize(size_t region_size, const size_t* block_shifts,
+ size_t num_block_shifts) {
+ size_t overhead_size = 0;
+ for (size_t i = 0; i < num_block_shifts; i++) {
+ const size_t cur_block_shift = block_shifts[i];
+ const size_t next_block_shift = (i != num_block_shifts - 1) ? block_shifts[i + 1] : 0;
overhead_size += KPageHeap::Block::CalculateManagementOverheadSize(
region_size, cur_block_shift, next_block_shift);
}
diff --git a/src/core/hle/kernel/k_page_heap.h b/src/core/hle/kernel/k_page_heap.h
index a65aa28a0..0917a8bed 100644
--- a/src/core/hle/kernel/k_page_heap.h
+++ b/src/core/hle/kernel/k_page_heap.h
@@ -1,6 +1,5 @@
-// Copyright 2020 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
@@ -23,54 +22,73 @@ public:
KPageHeap() = default;
~KPageHeap() = default;
- constexpr VAddr GetAddress() const {
- return heap_address;
+ constexpr PAddr GetAddress() const {
+ return m_heap_address;
}
- constexpr std::size_t GetSize() const {
- return heap_size;
+ constexpr size_t GetSize() const {
+ return m_heap_size;
}
- constexpr VAddr GetEndAddress() const {
- return GetAddress() + GetSize();
+ constexpr PAddr GetEndAddress() const {
+ return this->GetAddress() + this->GetSize();
}
- constexpr std::size_t GetPageOffset(VAddr block) const {
- return (block - GetAddress()) / PageSize;
+ constexpr size_t GetPageOffset(PAddr block) const {
+ return (block - this->GetAddress()) / PageSize;
+ }
+ constexpr size_t GetPageOffsetToEnd(PAddr block) const {
+ return (this->GetEndAddress() - block) / PageSize;
+ }
+
+ void Initialize(PAddr heap_address, size_t heap_size, VAddr management_address,
+ size_t management_size) {
+ return this->Initialize(heap_address, heap_size, management_address, management_size,
+ MemoryBlockPageShifts.data(), NumMemoryBlockPageShifts);
+ }
+
+ size_t GetFreeSize() const {
+ return this->GetNumFreePages() * PageSize;
}
- void Initialize(VAddr heap_address, std::size_t heap_size, std::size_t metadata_size);
- VAddr AllocateBlock(s32 index, bool random);
- void Free(VAddr addr, std::size_t num_pages);
+ void SetInitialUsedSize(size_t reserved_size) {
+ // Check that the reserved size is valid.
+ const size_t free_size = this->GetNumFreePages() * PageSize;
+ ASSERT(m_heap_size >= free_size + reserved_size);
- void UpdateUsedSize() {
- used_size = heap_size - (GetNumFreePages() * PageSize);
+ // Set the initial used size.
+ m_initial_used_size = m_heap_size - free_size - reserved_size;
}
- static std::size_t CalculateManagementOverheadSize(std::size_t region_size);
+ PAddr AllocateBlock(s32 index, bool random);
+ void Free(PAddr addr, size_t num_pages);
+
+ static size_t CalculateManagementOverheadSize(size_t region_size) {
+ return CalculateManagementOverheadSize(region_size, MemoryBlockPageShifts.data(),
+ NumMemoryBlockPageShifts);
+ }
- static constexpr s32 GetAlignedBlockIndex(std::size_t num_pages, std::size_t align_pages) {
- const auto target_pages{std::max(num_pages, align_pages)};
- for (std::size_t i = 0; i < NumMemoryBlockPageShifts; i++) {
- if (target_pages <=
- (static_cast<std::size_t>(1) << MemoryBlockPageShifts[i]) / PageSize) {
+ static constexpr s32 GetAlignedBlockIndex(size_t num_pages, size_t align_pages) {
+ const size_t target_pages = std::max(num_pages, align_pages);
+ for (size_t i = 0; i < NumMemoryBlockPageShifts; i++) {
+ if (target_pages <= (size_t(1) << MemoryBlockPageShifts[i]) / PageSize) {
return static_cast<s32>(i);
}
}
return -1;
}
- static constexpr s32 GetBlockIndex(std::size_t num_pages) {
- for (s32 i{static_cast<s32>(NumMemoryBlockPageShifts) - 1}; i >= 0; i--) {
- if (num_pages >= (static_cast<std::size_t>(1) << MemoryBlockPageShifts[i]) / PageSize) {
+ static constexpr s32 GetBlockIndex(size_t num_pages) {
+ for (s32 i = static_cast<s32>(NumMemoryBlockPageShifts) - 1; i >= 0; i--) {
+ if (num_pages >= (size_t(1) << MemoryBlockPageShifts[i]) / PageSize) {
return i;
}
}
return -1;
}
- static constexpr std::size_t GetBlockSize(std::size_t index) {
- return static_cast<std::size_t>(1) << MemoryBlockPageShifts[index];
+ static constexpr size_t GetBlockSize(size_t index) {
+ return size_t(1) << MemoryBlockPageShifts[index];
}
- static constexpr std::size_t GetBlockNumPages(std::size_t index) {
+ static constexpr size_t GetBlockNumPages(size_t index) {
return GetBlockSize(index) / PageSize;
}
@@ -83,114 +101,116 @@ private:
Block() = default;
~Block() = default;
- constexpr std::size_t GetShift() const {
- return block_shift;
+ constexpr size_t GetShift() const {
+ return m_block_shift;
}
- constexpr std::size_t GetNextShift() const {
- return next_block_shift;
+ constexpr size_t GetNextShift() const {
+ return m_next_block_shift;
}
- constexpr std::size_t GetSize() const {
- return static_cast<std::size_t>(1) << GetShift();
+ constexpr size_t GetSize() const {
+ return u64(1) << this->GetShift();
}
- constexpr std::size_t GetNumPages() const {
- return GetSize() / PageSize;
+ constexpr size_t GetNumPages() const {
+ return this->GetSize() / PageSize;
}
- constexpr std::size_t GetNumFreeBlocks() const {
- return bitmap.GetNumBits();
+ constexpr size_t GetNumFreeBlocks() const {
+ return m_bitmap.GetNumBits();
}
- constexpr std::size_t GetNumFreePages() const {
- return GetNumFreeBlocks() * GetNumPages();
+ constexpr size_t GetNumFreePages() const {
+ return this->GetNumFreeBlocks() * this->GetNumPages();
}
- u64* Initialize(VAddr addr, std::size_t size, std::size_t bs, std::size_t nbs,
- u64* bit_storage) {
- // Set shifts
- block_shift = bs;
- next_block_shift = nbs;
-
- // Align up the address
- VAddr end{addr + size};
- const auto align{(next_block_shift != 0) ? (1ULL << next_block_shift)
- : (1ULL << block_shift)};
- addr = Common::AlignDown((addr), align);
- end = Common::AlignUp((end), align);
-
- heap_address = addr;
- end_offset = (end - addr) / (1ULL << block_shift);
- return bitmap.Initialize(bit_storage, end_offset);
+ u64* Initialize(PAddr addr, size_t size, size_t bs, size_t nbs, u64* bit_storage) {
+ // Set shifts.
+ m_block_shift = bs;
+ m_next_block_shift = nbs;
+
+ // Align up the address.
+ PAddr end = addr + size;
+ const size_t align = (m_next_block_shift != 0) ? (u64(1) << m_next_block_shift)
+ : (u64(1) << m_block_shift);
+ addr = Common::AlignDown(addr, align);
+ end = Common::AlignUp(end, align);
+
+ m_heap_address = addr;
+ m_end_offset = (end - addr) / (u64(1) << m_block_shift);
+ return m_bitmap.Initialize(bit_storage, m_end_offset);
}
- VAddr PushBlock(VAddr address) {
- // Set the bit for the free block
- std::size_t offset{(address - heap_address) >> GetShift()};
- bitmap.SetBit(offset);
+ PAddr PushBlock(PAddr address) {
+ // Set the bit for the free block.
+ size_t offset = (address - m_heap_address) >> this->GetShift();
+ m_bitmap.SetBit(offset);
- // If we have a next shift, try to clear the blocks below and return the address
- if (GetNextShift()) {
- const auto diff{1ULL << (GetNextShift() - GetShift())};
+ // If we have a next shift, try to clear the blocks below this one and return the new
+ // address.
+ if (this->GetNextShift()) {
+ const size_t diff = u64(1) << (this->GetNextShift() - this->GetShift());
offset = Common::AlignDown(offset, diff);
- if (bitmap.ClearRange(offset, diff)) {
- return heap_address + (offset << GetShift());
+ if (m_bitmap.ClearRange(offset, diff)) {
+ return m_heap_address + (offset << this->GetShift());
}
}
- // We couldn't coalesce, or we're already as big as possible
- return 0;
+ // We couldn't coalesce, or we're already as big as possible.
+ return {};
}
- VAddr PopBlock(bool random) {
- // Find a free block
- const s64 soffset{bitmap.FindFreeBlock(random)};
+ PAddr PopBlock(bool random) {
+ // Find a free block.
+ s64 soffset = m_bitmap.FindFreeBlock(random);
if (soffset < 0) {
- return 0;
+ return {};
}
- const auto offset{static_cast<std::size_t>(soffset)};
+ const size_t offset = static_cast<size_t>(soffset);
- // Update our tracking and return it
- bitmap.ClearBit(offset);
- return heap_address + (offset << GetShift());
+ // Update our tracking and return it.
+ m_bitmap.ClearBit(offset);
+ return m_heap_address + (offset << this->GetShift());
}
- static constexpr std::size_t CalculateManagementOverheadSize(std::size_t region_size,
- std::size_t cur_block_shift,
- std::size_t next_block_shift) {
- const auto cur_block_size{(1ULL << cur_block_shift)};
- const auto next_block_size{(1ULL << next_block_shift)};
- const auto align{(next_block_shift != 0) ? next_block_size : cur_block_size};
+ public:
+ static constexpr size_t CalculateManagementOverheadSize(size_t region_size,
+ size_t cur_block_shift,
+ size_t next_block_shift) {
+ const size_t cur_block_size = (u64(1) << cur_block_shift);
+ const size_t next_block_size = (u64(1) << next_block_shift);
+ const size_t align = (next_block_shift != 0) ? next_block_size : cur_block_size;
return KPageBitmap::CalculateManagementOverheadSize(
(align * 2 + Common::AlignUp(region_size, align)) / cur_block_size);
}
private:
- KPageBitmap bitmap;
- VAddr heap_address{};
- uintptr_t end_offset{};
- std::size_t block_shift{};
- std::size_t next_block_shift{};
+ KPageBitmap m_bitmap;
+ PAddr m_heap_address{};
+ uintptr_t m_end_offset{};
+ size_t m_block_shift{};
+ size_t m_next_block_shift{};
};
- constexpr std::size_t GetNumFreePages() const {
- std::size_t num_free{};
-
- for (const auto& block : blocks) {
- num_free += block.GetNumFreePages();
- }
-
- return num_free;
- }
+private:
+ void Initialize(PAddr heap_address, size_t heap_size, VAddr management_address,
+ size_t management_size, const size_t* block_shifts, size_t num_block_shifts);
+ size_t GetNumFreePages() const;
- void FreeBlock(VAddr block, s32 index);
+ void FreeBlock(PAddr block, s32 index);
- static constexpr std::size_t NumMemoryBlockPageShifts{7};
- static constexpr std::array<std::size_t, NumMemoryBlockPageShifts> MemoryBlockPageShifts{
+ static constexpr size_t NumMemoryBlockPageShifts{7};
+ static constexpr std::array<size_t, NumMemoryBlockPageShifts> MemoryBlockPageShifts{
0xC, 0x10, 0x15, 0x16, 0x19, 0x1D, 0x1E,
};
- VAddr heap_address{};
- std::size_t heap_size{};
- std::size_t used_size{};
- std::array<Block, NumMemoryBlockPageShifts> blocks{};
- std::vector<u64> metadata;
+private:
+ static size_t CalculateManagementOverheadSize(size_t region_size, const size_t* block_shifts,
+ size_t num_block_shifts);
+
+private:
+ PAddr m_heap_address{};
+ size_t m_heap_size{};
+ size_t m_initial_used_size{};
+ size_t m_num_blocks{};
+ std::array<Block, NumMemoryBlockPageShifts> m_blocks{};
+ std::vector<u64> m_management_data;
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp
index 912853e5c..d975de844 100644
--- a/src/core/hle/kernel/k_page_table.cpp
+++ b/src/core/hle/kernel/k_page_table.cpp
@@ -1,6 +1,5 @@
-// Copyright 2020 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#include "common/alignment.h"
#include "common/assert.h"
@@ -10,7 +9,7 @@
#include "core/hle/kernel/k_address_space_info.h"
#include "core/hle/kernel/k_memory_block.h"
#include "core/hle/kernel/k_memory_block_manager.h"
-#include "core/hle/kernel/k_page_linked_list.h"
+#include "core/hle/kernel/k_page_group.h"
#include "core/hle/kernel/k_page_table.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_resource_limit.h"
@@ -36,29 +35,11 @@ constexpr std::size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceT
case FileSys::ProgramAddressSpaceType::Is39Bit:
return 39;
default:
- UNREACHABLE();
+ ASSERT(false);
return {};
}
}
-constexpr u64 GetAddressInRange(const KMemoryInfo& info, VAddr addr) {
- if (info.GetAddress() < addr) {
- return addr;
- }
- return info.GetAddress();
-}
-
-constexpr std::size_t GetSizeInRange(const KMemoryInfo& info, VAddr start, VAddr end) {
- std::size_t size{info.GetSize()};
- if (info.GetAddress() < start) {
- size -= start - info.GetAddress();
- }
- if (info.GetEndAddress() > end) {
- size -= info.GetEndAddress() - end;
- }
- return size;
-}
-
} // namespace
KPageTable::KPageTable(Core::System& system_)
@@ -66,9 +47,9 @@ KPageTable::KPageTable(Core::System& system_)
KPageTable::~KPageTable() = default;
-ResultCode KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type,
- bool enable_aslr, VAddr code_addr,
- std::size_t code_size, KMemoryManager::Pool pool) {
+Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr,
+ VAddr code_addr, std::size_t code_size,
+ KMemoryManager::Pool pool) {
const auto GetSpaceStart = [this](KAddressSpaceInfo::Type type) {
return KAddressSpaceInfo::GetAddressSpaceStart(address_space_width, type);
@@ -84,7 +65,6 @@ ResultCode KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_
std::size_t alias_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Alias)};
std::size_t heap_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Heap)};
- ASSERT(start <= code_addr);
ASSERT(code_addr < code_addr + code_size);
ASSERT(code_addr + code_size - 1 <= end - 1);
@@ -147,7 +127,7 @@ ResultCode KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_
const std::size_t needed_size{
(alias_region_size + heap_region_size + stack_region_size + kernel_map_region_size)};
if (alloc_size < needed_size) {
- UNREACHABLE();
+ ASSERT(false);
return ResultOutOfMemory;
}
@@ -277,8 +257,8 @@ ResultCode KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_
return InitializeMemoryLayout(start, end);
}
-ResultCode KPageTable::MapProcessCode(VAddr addr, std::size_t num_pages, KMemoryState state,
- KMemoryPermission perm) {
+Result KPageTable::MapProcessCode(VAddr addr, std::size_t num_pages, KMemoryState state,
+ KMemoryPermission perm) {
const u64 size{num_pages * PageSize};
// Validate the mapping request.
@@ -291,89 +271,367 @@ ResultCode KPageTable::MapProcessCode(VAddr addr, std::size_t num_pages, KMemory
R_TRY(this->CheckMemoryState(addr, size, KMemoryState::All, KMemoryState::Free,
KMemoryPermission::None, KMemoryPermission::None,
KMemoryAttribute::None, KMemoryAttribute::None));
+ KPageGroup pg;
+ R_TRY(system.Kernel().MemoryManager().AllocateAndOpen(
+ &pg, num_pages,
+ KMemoryManager::EncodeOption(KMemoryManager::Pool::Application, allocation_option)));
- KPageLinkedList page_linked_list;
- R_TRY(system.Kernel().MemoryManager().Allocate(page_linked_list, num_pages, memory_pool,
- allocation_option));
- R_TRY(Operate(addr, num_pages, page_linked_list, OperationType::MapGroup));
+ R_TRY(Operate(addr, num_pages, pg, OperationType::MapGroup));
block_manager->Update(addr, num_pages, state, perm);
return ResultSuccess;
}
-ResultCode KPageTable::MapCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) {
+Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size) {
+ // Validate the mapping request.
+ R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode),
+ ResultInvalidMemoryRegion);
+
+ // Lock the table.
KScopedLightLock lk(general_lock);
- const std::size_t num_pages{size / PageSize};
+ // Verify that the source memory is normal heap.
+ KMemoryState src_state{};
+ KMemoryPermission src_perm{};
+ std::size_t num_src_allocator_blocks{};
+ R_TRY(this->CheckMemoryState(&src_state, &src_perm, nullptr, &num_src_allocator_blocks,
+ src_address, size, KMemoryState::All, KMemoryState::Normal,
+ KMemoryPermission::All, KMemoryPermission::UserReadWrite,
+ KMemoryAttribute::All, KMemoryAttribute::None));
- KMemoryState state{};
- KMemoryPermission perm{};
- CASCADE_CODE(CheckMemoryState(&state, &perm, nullptr, nullptr, src_addr, size,
- KMemoryState::All, KMemoryState::Normal, KMemoryPermission::All,
- KMemoryPermission::UserReadWrite, KMemoryAttribute::Mask,
- KMemoryAttribute::None, KMemoryAttribute::IpcAndDeviceMapped));
+ // Verify that the destination memory is unmapped.
+ std::size_t num_dst_allocator_blocks{};
+ R_TRY(this->CheckMemoryState(&num_dst_allocator_blocks, dst_address, size, KMemoryState::All,
+ KMemoryState::Free, KMemoryPermission::None,
+ KMemoryPermission::None, KMemoryAttribute::None,
+ KMemoryAttribute::None));
- if (IsRegionMapped(dst_addr, size)) {
- return ResultInvalidCurrentMemory;
+ // Map the code memory.
+ {
+ // Determine the number of pages being operated on.
+ const std::size_t num_pages = size / PageSize;
+
+ // Create page groups for the memory being mapped.
+ KPageGroup pg;
+ AddRegionToPages(src_address, num_pages, pg);
+
+ // Reprotect the source as kernel-read/not mapped.
+ const auto new_perm = static_cast<KMemoryPermission>(KMemoryPermission::KernelRead |
+ KMemoryPermission::NotMapped);
+ R_TRY(Operate(src_address, num_pages, new_perm, OperationType::ChangePermissions));
+
+ // Ensure that we unprotect the source pages on failure.
+ auto unprot_guard = SCOPE_GUARD({
+ ASSERT(this->Operate(src_address, num_pages, src_perm, OperationType::ChangePermissions)
+ .IsSuccess());
+ });
+
+ // Map the alias pages.
+ R_TRY(MapPages(dst_address, pg, new_perm));
+
+ // We successfully mapped the alias pages, so we don't need to unprotect the src pages on
+ // failure.
+ unprot_guard.Cancel();
+
+ // Apply the memory block updates.
+ block_manager->Update(src_address, num_pages, src_state, new_perm,
+ KMemoryAttribute::Locked);
+ block_manager->Update(dst_address, num_pages, KMemoryState::AliasCode, new_perm,
+ KMemoryAttribute::None);
}
- KPageLinkedList page_linked_list;
- AddRegionToPages(src_addr, num_pages, page_linked_list);
+ return ResultSuccess;
+}
+
+Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size,
+ ICacheInvalidationStrategy icache_invalidation_strategy) {
+ // Validate the mapping request.
+ R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode),
+ ResultInvalidMemoryRegion);
+
+ // Lock the table.
+ KScopedLightLock lk(general_lock);
+
+ // Verify that the source memory is locked normal heap.
+ std::size_t num_src_allocator_blocks{};
+ R_TRY(this->CheckMemoryState(std::addressof(num_src_allocator_blocks), src_address, size,
+ KMemoryState::All, KMemoryState::Normal, KMemoryPermission::None,
+ KMemoryPermission::None, KMemoryAttribute::All,
+ KMemoryAttribute::Locked));
+
+ // Verify that the destination memory is aliasable code.
+ std::size_t num_dst_allocator_blocks{};
+ R_TRY(this->CheckMemoryStateContiguous(
+ std::addressof(num_dst_allocator_blocks), dst_address, size, KMemoryState::FlagCanCodeAlias,
+ KMemoryState::FlagCanCodeAlias, KMemoryPermission::None, KMemoryPermission::None,
+ KMemoryAttribute::All, KMemoryAttribute::None));
+ // Determine whether any pages being unmapped are code.
+ bool any_code_pages = false;
{
- auto block_guard = detail::ScopeExit(
- [&] { Operate(src_addr, num_pages, perm, OperationType::ChangePermissions); });
+ KMemoryBlockManager::const_iterator it = block_manager->FindIterator(dst_address);
+ while (true) {
+ // Get the memory info.
+ const KMemoryInfo info = it->GetMemoryInfo();
+
+ // Check if the memory has code flag.
+ if ((info.GetState() & KMemoryState::FlagCode) != KMemoryState::None) {
+ any_code_pages = true;
+ break;
+ }
- CASCADE_CODE(Operate(src_addr, num_pages, KMemoryPermission::None,
- OperationType::ChangePermissions));
- CASCADE_CODE(MapPages(dst_addr, page_linked_list, KMemoryPermission::None));
+ // Check if we're done.
+ if (dst_address + size - 1 <= info.GetLastAddress()) {
+ break;
+ }
- block_guard.Cancel();
+ // Advance.
+ ++it;
+ }
}
- block_manager->Update(src_addr, num_pages, state, KMemoryPermission::None,
- KMemoryAttribute::Locked);
- block_manager->Update(dst_addr, num_pages, KMemoryState::AliasCode);
+ // Ensure that we maintain the instruction cache.
+ bool reprotected_pages = false;
+ SCOPE_EXIT({
+ if (reprotected_pages && any_code_pages) {
+ if (icache_invalidation_strategy == ICacheInvalidationStrategy::InvalidateRange) {
+ system.InvalidateCpuInstructionCacheRange(dst_address, size);
+ } else {
+ system.InvalidateCpuInstructionCaches();
+ }
+ }
+ });
+
+ // Unmap.
+ {
+ // Determine the number of pages being operated on.
+ const std::size_t num_pages = size / PageSize;
+
+ // Unmap the aliased copy of the pages.
+ R_TRY(Operate(dst_address, num_pages, KMemoryPermission::None, OperationType::Unmap));
+
+ // Try to set the permissions for the source pages back to what they should be.
+ R_TRY(Operate(src_address, num_pages, KMemoryPermission::UserReadWrite,
+ OperationType::ChangePermissions));
+
+ // Apply the memory block updates.
+ block_manager->Update(dst_address, num_pages, KMemoryState::None);
+ block_manager->Update(src_address, num_pages, KMemoryState::Normal,
+ KMemoryPermission::UserReadWrite);
+
+ // Note that we reprotected pages.
+ reprotected_pages = true;
+ }
return ResultSuccess;
}
-ResultCode KPageTable::UnmapCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) {
- KScopedLightLock lk(general_lock);
+VAddr KPageTable::FindFreeArea(VAddr region_start, std::size_t region_num_pages,
+ std::size_t num_pages, std::size_t alignment, std::size_t offset,
+ std::size_t guard_pages) {
+ VAddr address = 0;
+
+ if (num_pages <= region_num_pages) {
+ if (this->IsAslrEnabled()) {
+ // Try to directly find a free area up to 8 times.
+ for (std::size_t i = 0; i < 8; i++) {
+ const std::size_t random_offset =
+ KSystemControl::GenerateRandomRange(
+ 0, (region_num_pages - num_pages - guard_pages) * PageSize / alignment) *
+ alignment;
+ const VAddr candidate =
+ Common::AlignDown((region_start + random_offset), alignment) + offset;
+
+ KMemoryInfo info = this->QueryInfoImpl(candidate);
+
+ if (info.state != KMemoryState::Free) {
+ continue;
+ }
+ if (region_start > candidate) {
+ continue;
+ }
+ if (info.GetAddress() + guard_pages * PageSize > candidate) {
+ continue;
+ }
+
+ const VAddr candidate_end = candidate + (num_pages + guard_pages) * PageSize - 1;
+ if (candidate_end > info.GetLastAddress()) {
+ continue;
+ }
+ if (candidate_end > region_start + region_num_pages * PageSize - 1) {
+ continue;
+ }
+
+ address = candidate;
+ break;
+ }
+ // Fall back to finding the first free area with a random offset.
+ if (address == 0) {
+ // NOTE: Nintendo does not account for guard pages here.
+ // This may theoretically cause an offset to be chosen that cannot be mapped. We
+ // will account for guard pages.
+ const std::size_t offset_pages = KSystemControl::GenerateRandomRange(
+ 0, region_num_pages - num_pages - guard_pages);
+ address = block_manager->FindFreeArea(region_start + offset_pages * PageSize,
+ region_num_pages - offset_pages, num_pages,
+ alignment, offset, guard_pages);
+ }
+ }
- if (!size) {
- return ResultSuccess;
+ // Find the first free area.
+ if (address == 0) {
+ address = block_manager->FindFreeArea(region_start, region_num_pages, num_pages,
+ alignment, offset, guard_pages);
+ }
}
- const std::size_t num_pages{size / PageSize};
+ return address;
+}
- CASCADE_CODE(CheckMemoryState(nullptr, nullptr, nullptr, nullptr, src_addr, size,
- KMemoryState::All, KMemoryState::Normal, KMemoryPermission::None,
- KMemoryPermission::None, KMemoryAttribute::Mask,
- KMemoryAttribute::Locked, KMemoryAttribute::IpcAndDeviceMapped));
+Result KPageTable::MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages) {
+ ASSERT(this->IsLockedByCurrentThread());
- KMemoryState state{};
- CASCADE_CODE(CheckMemoryState(
- &state, nullptr, nullptr, nullptr, dst_addr, PageSize, KMemoryState::FlagCanCodeAlias,
- KMemoryState::FlagCanCodeAlias, KMemoryPermission::None, KMemoryPermission::None,
- KMemoryAttribute::Mask, KMemoryAttribute::None, KMemoryAttribute::IpcAndDeviceMapped));
- CASCADE_CODE(CheckMemoryState(dst_addr, size, KMemoryState::All, state, KMemoryPermission::None,
- KMemoryPermission::None, KMemoryAttribute::Mask,
- KMemoryAttribute::None));
- CASCADE_CODE(Operate(dst_addr, num_pages, KMemoryPermission::None, OperationType::Unmap));
+ const size_t size = num_pages * PageSize;
- block_manager->Update(dst_addr, num_pages, KMemoryState::Free);
- block_manager->Update(src_addr, num_pages, KMemoryState::Normal,
- KMemoryPermission::UserReadWrite);
+ // We're making a new group, not adding to an existing one.
+ R_UNLESS(pg.Empty(), ResultInvalidCurrentMemory);
+
+ // Begin traversal.
+ Common::PageTable::TraversalContext context;
+ Common::PageTable::TraversalEntry next_entry;
+ R_UNLESS(page_table_impl.BeginTraversal(next_entry, context, addr), ResultInvalidCurrentMemory);
- system.InvalidateCpuInstructionCacheRange(dst_addr, size);
+ // Prepare tracking variables.
+ PAddr cur_addr = next_entry.phys_addr;
+ size_t cur_size = next_entry.block_size - (cur_addr & (next_entry.block_size - 1));
+ size_t tot_size = cur_size;
+
+ // Iterate, adding to group as we go.
+ const auto& memory_layout = system.Kernel().MemoryLayout();
+ while (tot_size < size) {
+ R_UNLESS(page_table_impl.ContinueTraversal(next_entry, context),
+ ResultInvalidCurrentMemory);
+
+ if (next_entry.phys_addr != (cur_addr + cur_size)) {
+ const size_t cur_pages = cur_size / PageSize;
+
+ R_UNLESS(IsHeapPhysicalAddress(memory_layout, cur_addr), ResultInvalidCurrentMemory);
+ R_TRY(pg.AddBlock(cur_addr, cur_pages));
+
+ cur_addr = next_entry.phys_addr;
+ cur_size = next_entry.block_size;
+ } else {
+ cur_size += next_entry.block_size;
+ }
+
+ tot_size += next_entry.block_size;
+ }
+
+ // Ensure we add the right amount for the last block.
+ if (tot_size > size) {
+ cur_size -= (tot_size - size);
+ }
+
+ // Add the last block.
+ const size_t cur_pages = cur_size / PageSize;
+ R_UNLESS(IsHeapPhysicalAddress(memory_layout, cur_addr), ResultInvalidCurrentMemory);
+ R_TRY(pg.AddBlock(cur_addr, cur_pages));
return ResultSuccess;
}
-ResultCode KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size,
- KPageTable& src_page_table, VAddr src_addr) {
+bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t num_pages) {
+ ASSERT(this->IsLockedByCurrentThread());
+
+ const size_t size = num_pages * PageSize;
+ const auto& pg = pg_ll.Nodes();
+ const auto& memory_layout = system.Kernel().MemoryLayout();
+
+ // Empty groups are necessarily invalid.
+ if (pg.empty()) {
+ return false;
+ }
+
+ // We're going to validate that the group we'd expect is the group we see.
+ auto cur_it = pg.begin();
+ PAddr cur_block_address = cur_it->GetAddress();
+ size_t cur_block_pages = cur_it->GetNumPages();
+
+ auto UpdateCurrentIterator = [&]() {
+ if (cur_block_pages == 0) {
+ if ((++cur_it) == pg.end()) {
+ return false;
+ }
+
+ cur_block_address = cur_it->GetAddress();
+ cur_block_pages = cur_it->GetNumPages();
+ }
+ return true;
+ };
+
+ // Begin traversal.
+ Common::PageTable::TraversalContext context;
+ Common::PageTable::TraversalEntry next_entry;
+ if (!page_table_impl.BeginTraversal(next_entry, context, addr)) {
+ return false;
+ }
+
+ // Prepare tracking variables.
+ PAddr cur_addr = next_entry.phys_addr;
+ size_t cur_size = next_entry.block_size - (cur_addr & (next_entry.block_size - 1));
+ size_t tot_size = cur_size;
+
+ // Iterate, comparing expected to actual.
+ while (tot_size < size) {
+ if (!page_table_impl.ContinueTraversal(next_entry, context)) {
+ return false;
+ }
+
+ if (next_entry.phys_addr != (cur_addr + cur_size)) {
+ const size_t cur_pages = cur_size / PageSize;
+
+ if (!IsHeapPhysicalAddress(memory_layout, cur_addr)) {
+ return false;
+ }
+
+ if (!UpdateCurrentIterator()) {
+ return false;
+ }
+
+ if (cur_block_address != cur_addr || cur_block_pages < cur_pages) {
+ return false;
+ }
+
+ cur_block_address += cur_size;
+ cur_block_pages -= cur_pages;
+ cur_addr = next_entry.phys_addr;
+ cur_size = next_entry.block_size;
+ } else {
+ cur_size += next_entry.block_size;
+ }
+
+ tot_size += next_entry.block_size;
+ }
+
+ // Ensure we compare the right amount for the last block.
+ if (tot_size > size) {
+ cur_size -= (tot_size - size);
+ }
+
+ if (!IsHeapPhysicalAddress(memory_layout, cur_addr)) {
+ return false;
+ }
+
+ if (!UpdateCurrentIterator()) {
+ return false;
+ }
+
+ return cur_block_address == cur_addr && cur_block_pages == (cur_size / PageSize);
+}
+
+Result KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size, KPageTable& src_page_table,
+ VAddr src_addr) {
KScopedLightLock lk(general_lock);
const std::size_t num_pages{size / PageSize};
@@ -397,156 +655,486 @@ ResultCode KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size,
block_manager->Update(dst_addr, num_pages, KMemoryState::Free, KMemoryPermission::None,
KMemoryAttribute::None);
+ system.InvalidateCpuInstructionCaches();
+
return ResultSuccess;
}
-ResultCode KPageTable::MapPhysicalMemory(VAddr addr, std::size_t size) {
+Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) {
// Lock the physical memory lock.
KScopedLightLock map_phys_mem_lk(map_physical_memory_lock);
- // Lock the table.
- KScopedLightLock lk(general_lock);
-
- std::size_t mapped_size{};
- const VAddr end_addr{addr + size};
+ // Calculate the last address for convenience.
+ const VAddr last_address = address + size - 1;
- block_manager->IterateForRange(addr, end_addr, [&](const KMemoryInfo& info) {
- if (info.state != KMemoryState::Free) {
- mapped_size += GetSizeInRange(info, addr, end_addr);
- }
- });
+ // Define iteration variables.
+ VAddr cur_address;
+ std::size_t mapped_size;
- if (mapped_size == size) {
- return ResultSuccess;
- }
+ // The entire mapping process can be retried.
+ while (true) {
+ // Check if the memory is already mapped.
+ {
+ // Lock the table.
+ KScopedLightLock lk(general_lock);
+
+ // Iterate over the memory.
+ cur_address = address;
+ mapped_size = 0;
+
+ auto it = block_manager->FindIterator(cur_address);
+ while (true) {
+ // Check that the iterator is valid.
+ ASSERT(it != block_manager->end());
+
+ // Get the memory info.
+ const KMemoryInfo info = it->GetMemoryInfo();
+
+ // Check if we're done.
+ if (last_address <= info.GetLastAddress()) {
+ if (info.GetState() != KMemoryState::Free) {
+ mapped_size += (last_address + 1 - cur_address);
+ }
+ break;
+ }
+
+ // Track the memory if it's mapped.
+ if (info.GetState() != KMemoryState::Free) {
+ mapped_size += VAddr(info.GetEndAddress()) - cur_address;
+ }
+
+ // Advance.
+ cur_address = info.GetEndAddress();
+ ++it;
+ }
- const std::size_t remaining_size{size - mapped_size};
- const std::size_t remaining_pages{remaining_size / PageSize};
+ // If the size mapped is the size requested, we've nothing to do.
+ R_SUCCEED_IF(size == mapped_size);
+ }
- // Reserve the memory from the process resource limit.
- KScopedResourceReservation memory_reservation(
- system.Kernel().CurrentProcess()->GetResourceLimit(), LimitableResource::PhysicalMemory,
- remaining_size);
- if (!memory_reservation.Succeeded()) {
- LOG_ERROR(Kernel, "Could not reserve remaining {:X} bytes", remaining_size);
- return ResultLimitReached;
+ // Allocate and map the memory.
+ {
+ // Reserve the memory from the process resource limit.
+ KScopedResourceReservation memory_reservation(
+ system.Kernel().CurrentProcess()->GetResourceLimit(),
+ LimitableResource::PhysicalMemory, size - mapped_size);
+ R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
+
+ // Allocate pages for the new memory.
+ KPageGroup pg;
+ R_TRY(system.Kernel().MemoryManager().AllocateAndOpenForProcess(
+ &pg, (size - mapped_size) / PageSize,
+ KMemoryManager::EncodeOption(memory_pool, allocation_option), 0, 0));
+
+ // Map the memory.
+ {
+ // Lock the table.
+ KScopedLightLock lk(general_lock);
+
+ size_t num_allocator_blocks = 0;
+
+ // Verify that nobody has mapped memory since we first checked.
+ {
+ // Iterate over the memory.
+ size_t checked_mapped_size = 0;
+ cur_address = address;
+
+ auto it = block_manager->FindIterator(cur_address);
+ while (true) {
+ // Check that the iterator is valid.
+ ASSERT(it != block_manager->end());
+
+ // Get the memory info.
+ const KMemoryInfo info = it->GetMemoryInfo();
+
+ const bool is_free = info.GetState() == KMemoryState::Free;
+ if (is_free) {
+ if (info.GetAddress() < address) {
+ ++num_allocator_blocks;
+ }
+ if (last_address < info.GetLastAddress()) {
+ ++num_allocator_blocks;
+ }
+ }
+
+ // Check if we're done.
+ if (last_address <= info.GetLastAddress()) {
+ if (!is_free) {
+ checked_mapped_size += (last_address + 1 - cur_address);
+ }
+ break;
+ }
+
+ // Track the memory if it's mapped.
+ if (!is_free) {
+ checked_mapped_size += VAddr(info.GetEndAddress()) - cur_address;
+ }
+
+ // Advance.
+ cur_address = info.GetEndAddress();
+ ++it;
+ }
+
+ // If the size now isn't what it was before, somebody mapped or unmapped
+ // concurrently. If this happened, retry.
+ if (mapped_size != checked_mapped_size) {
+ continue;
+ }
+ }
+
+ // Reset the current tracking address, and make sure we clean up on failure.
+ cur_address = address;
+ auto unmap_guard = detail::ScopeExit([&] {
+ if (cur_address > address) {
+ const VAddr last_unmap_address = cur_address - 1;
+
+ // Iterate, unmapping the pages.
+ cur_address = address;
+
+ auto it = block_manager->FindIterator(cur_address);
+ while (true) {
+ // Check that the iterator is valid.
+ ASSERT(it != block_manager->end());
+
+ // Get the memory info.
+ const KMemoryInfo info = it->GetMemoryInfo();
+
+ // If the memory state is free, we mapped it and need to unmap it.
+ if (info.GetState() == KMemoryState::Free) {
+ // Determine the range to unmap.
+ const size_t cur_pages =
+ std::min(VAddr(info.GetEndAddress()) - cur_address,
+ last_unmap_address + 1 - cur_address) /
+ PageSize;
+
+ // Unmap.
+ ASSERT(Operate(cur_address, cur_pages, KMemoryPermission::None,
+ OperationType::Unmap)
+ .IsSuccess());
+ }
+
+ // Check if we're done.
+ if (last_unmap_address <= info.GetLastAddress()) {
+ break;
+ }
+
+ // Advance.
+ cur_address = info.GetEndAddress();
+ ++it;
+ }
+ }
+ });
+
+ // Iterate over the memory.
+ auto pg_it = pg.Nodes().begin();
+ PAddr pg_phys_addr = pg_it->GetAddress();
+ size_t pg_pages = pg_it->GetNumPages();
+
+ auto it = block_manager->FindIterator(cur_address);
+ while (true) {
+ // Check that the iterator is valid.
+ ASSERT(it != block_manager->end());
+
+ // Get the memory info.
+ const KMemoryInfo info = it->GetMemoryInfo();
+
+ // If it's unmapped, we need to map it.
+ if (info.GetState() == KMemoryState::Free) {
+ // Determine the range to map.
+ size_t map_pages = std::min(VAddr(info.GetEndAddress()) - cur_address,
+ last_address + 1 - cur_address) /
+ PageSize;
+
+ // While we have pages to map, map them.
+ while (map_pages > 0) {
+ // Check if we're at the end of the physical block.
+ if (pg_pages == 0) {
+ // Ensure there are more pages to map.
+ ASSERT(pg_it != pg.Nodes().end());
+
+ // Advance our physical block.
+ ++pg_it;
+ pg_phys_addr = pg_it->GetAddress();
+ pg_pages = pg_it->GetNumPages();
+ }
+
+ // Map whatever we can.
+ const size_t cur_pages = std::min(pg_pages, map_pages);
+ R_TRY(Operate(cur_address, cur_pages, KMemoryPermission::UserReadWrite,
+ OperationType::Map, pg_phys_addr));
+
+ // Advance.
+ cur_address += cur_pages * PageSize;
+ map_pages -= cur_pages;
+
+ pg_phys_addr += cur_pages * PageSize;
+ pg_pages -= cur_pages;
+ }
+ }
+
+ // Check if we're done.
+ if (last_address <= info.GetLastAddress()) {
+ break;
+ }
+
+ // Advance.
+ cur_address = info.GetEndAddress();
+ ++it;
+ }
+
+ // We succeeded, so commit the memory reservation.
+ memory_reservation.Commit();
+
+ // Increase our tracked mapped size.
+ mapped_physical_memory_size += (size - mapped_size);
+
+ // Update the relevant memory blocks.
+ block_manager->Update(address, size / PageSize, KMemoryState::Free,
+ KMemoryPermission::None, KMemoryAttribute::None,
+ KMemoryState::Normal, KMemoryPermission::UserReadWrite,
+ KMemoryAttribute::None);
+
+ // Cancel our guard.
+ unmap_guard.Cancel();
+
+ return ResultSuccess;
+ }
+ }
}
+}
- KPageLinkedList page_linked_list;
+Result KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) {
+ // Lock the physical memory lock.
+ KScopedLightLock map_phys_mem_lk(map_physical_memory_lock);
- CASCADE_CODE(system.Kernel().MemoryManager().Allocate(page_linked_list, remaining_pages,
- memory_pool, allocation_option));
+ // Lock the table.
+ KScopedLightLock lk(general_lock);
- // We succeeded, so commit the memory reservation.
- memory_reservation.Commit();
+ // Calculate the last address for convenience.
+ const VAddr last_address = address + size - 1;
- // Map the memory.
- auto node{page_linked_list.Nodes().begin()};
- PAddr map_addr{node->GetAddress()};
- std::size_t src_num_pages{node->GetNumPages()};
- block_manager->IterateForRange(addr, end_addr, [&](const KMemoryInfo& info) {
- if (info.state != KMemoryState::Free) {
- return;
- }
+ // Define iteration variables.
+ VAddr cur_address = 0;
+ std::size_t mapped_size = 0;
+ std::size_t num_allocator_blocks = 0;
- std::size_t dst_num_pages{GetSizeInRange(info, addr, end_addr) / PageSize};
- VAddr dst_addr{GetAddressInRange(info, addr)};
+ // Check if the memory is mapped.
+ {
+ // Iterate over the memory.
+ cur_address = address;
+ mapped_size = 0;
+
+ auto it = block_manager->FindIterator(cur_address);
+ while (true) {
+ // Check that the iterator is valid.
+ ASSERT(it != block_manager->end());
+
+ // Get the memory info.
+ const KMemoryInfo info = it->GetMemoryInfo();
+
+ // Verify the memory's state.
+ const bool is_normal = info.GetState() == KMemoryState::Normal &&
+ info.GetAttribute() == KMemoryAttribute::None;
+ const bool is_free = info.GetState() == KMemoryState::Free;
+ R_UNLESS(is_normal || is_free, ResultInvalidCurrentMemory);
+
+ if (is_normal) {
+ R_UNLESS(info.GetAttribute() == KMemoryAttribute::None, ResultInvalidCurrentMemory);
+
+ if (info.GetAddress() < address) {
+ ++num_allocator_blocks;
+ }
+ if (last_address < info.GetLastAddress()) {
+ ++num_allocator_blocks;
+ }
+ }
- while (dst_num_pages) {
- if (!src_num_pages) {
- node = std::next(node);
- map_addr = node->GetAddress();
- src_num_pages = node->GetNumPages();
+ // Check if we're done.
+ if (last_address <= info.GetLastAddress()) {
+ if (is_normal) {
+ mapped_size += (last_address + 1 - cur_address);
+ }
+ break;
}
- const std::size_t num_pages{std::min(src_num_pages, dst_num_pages)};
- Operate(dst_addr, num_pages, KMemoryPermission::UserReadWrite, OperationType::Map,
- map_addr);
+ // Track the memory if it's mapped.
+ if (is_normal) {
+ mapped_size += VAddr(info.GetEndAddress()) - cur_address;
+ }
- dst_addr += num_pages * PageSize;
- map_addr += num_pages * PageSize;
- src_num_pages -= num_pages;
- dst_num_pages -= num_pages;
+ // Advance.
+ cur_address = info.GetEndAddress();
+ ++it;
}
- });
- mapped_physical_memory_size += remaining_size;
-
- const std::size_t num_pages{size / PageSize};
- block_manager->Update(addr, num_pages, KMemoryState::Free, KMemoryPermission::None,
- KMemoryAttribute::None, KMemoryState::Normal,
- KMemoryPermission::UserReadWrite, KMemoryAttribute::None);
-
- return ResultSuccess;
-}
+ // If there's nothing mapped, we've nothing to do.
+ R_SUCCEED_IF(mapped_size == 0);
+ }
-ResultCode KPageTable::UnmapPhysicalMemory(VAddr addr, std::size_t size) {
- // Lock the physical memory lock.
- KScopedLightLock map_phys_mem_lk(map_physical_memory_lock);
+ // Make a page group for the unmap region.
+ KPageGroup pg;
+ {
+ auto& impl = this->PageTableImpl();
+
+ // Begin traversal.
+ Common::PageTable::TraversalContext context;
+ Common::PageTable::TraversalEntry cur_entry = {.phys_addr = 0, .block_size = 0};
+ bool cur_valid = false;
+ Common::PageTable::TraversalEntry next_entry;
+ bool next_valid = false;
+ size_t tot_size = 0;
+
+ cur_address = address;
+ next_valid = impl.BeginTraversal(next_entry, context, cur_address);
+ next_entry.block_size =
+ (next_entry.block_size - (next_entry.phys_addr & (next_entry.block_size - 1)));
+
+ // Iterate, building the group.
+ while (true) {
+ if ((!next_valid && !cur_valid) ||
+ (next_valid && cur_valid &&
+ next_entry.phys_addr == cur_entry.phys_addr + cur_entry.block_size)) {
+ cur_entry.block_size += next_entry.block_size;
+ } else {
+ if (cur_valid) {
+ // ASSERT(IsHeapPhysicalAddress(cur_entry.phys_addr));
+ R_TRY(pg.AddBlock(cur_entry.phys_addr, cur_entry.block_size / PageSize));
+ }
+
+ // Update tracking variables.
+ tot_size += cur_entry.block_size;
+ cur_entry = next_entry;
+ cur_valid = next_valid;
+ }
- // Lock the table.
- KScopedLightLock lk(general_lock);
+ if (cur_entry.block_size + tot_size >= size) {
+ break;
+ }
- const VAddr end_addr{addr + size};
- ResultCode result{ResultSuccess};
- std::size_t mapped_size{};
+ next_valid = impl.ContinueTraversal(next_entry, context);
+ }
- // Verify that the region can be unmapped
- block_manager->IterateForRange(addr, end_addr, [&](const KMemoryInfo& info) {
- if (info.state == KMemoryState::Normal) {
- if (info.attribute != KMemoryAttribute::None) {
- result = ResultInvalidCurrentMemory;
- return;
+ // Add the last block.
+ if (cur_valid) {
+ // ASSERT(IsHeapPhysicalAddress(cur_entry.phys_addr));
+ R_TRY(pg.AddBlock(cur_entry.phys_addr, (size - tot_size) / PageSize));
+ }
+ }
+ ASSERT(pg.GetNumPages() == mapped_size / PageSize);
+
+ // Reset the current tracking address, and make sure we clean up on failure.
+ cur_address = address;
+ auto remap_guard = detail::ScopeExit([&] {
+ if (cur_address > address) {
+ const VAddr last_map_address = cur_address - 1;
+ cur_address = address;
+
+ // Iterate over the memory we unmapped.
+ auto it = block_manager->FindIterator(cur_address);
+ auto pg_it = pg.Nodes().begin();
+ PAddr pg_phys_addr = pg_it->GetAddress();
+ size_t pg_pages = pg_it->GetNumPages();
+
+ while (true) {
+ // Get the memory info for the pages we unmapped, convert to property.
+ const KMemoryInfo info = it->GetMemoryInfo();
+
+ // If the memory is normal, we unmapped it and need to re-map it.
+ if (info.GetState() == KMemoryState::Normal) {
+ // Determine the range to map.
+ size_t map_pages = std::min(VAddr(info.GetEndAddress()) - cur_address,
+ last_map_address + 1 - cur_address) /
+ PageSize;
+
+ // While we have pages to map, map them.
+ while (map_pages > 0) {
+ // Check if we're at the end of the physical block.
+ if (pg_pages == 0) {
+ // Ensure there are more pages to map.
+ ASSERT(pg_it != pg.Nodes().end());
+
+ // Advance our physical block.
+ ++pg_it;
+ pg_phys_addr = pg_it->GetAddress();
+ pg_pages = pg_it->GetNumPages();
+ }
+
+ // Map whatever we can.
+ const size_t cur_pages = std::min(pg_pages, map_pages);
+ ASSERT(this->Operate(cur_address, cur_pages, info.GetPermission(),
+ OperationType::Map, pg_phys_addr) == ResultSuccess);
+
+ // Advance.
+ cur_address += cur_pages * PageSize;
+ map_pages -= cur_pages;
+
+ pg_phys_addr += cur_pages * PageSize;
+ pg_pages -= cur_pages;
+ }
+ }
+
+ // Check if we're done.
+ if (last_map_address <= info.GetLastAddress()) {
+ break;
+ }
+
+ // Advance.
+ ++it;
}
- mapped_size += GetSizeInRange(info, addr, end_addr);
- } else if (info.state != KMemoryState::Free) {
- result = ResultInvalidCurrentMemory;
}
});
- if (result.IsError()) {
- return result;
- }
-
- if (!mapped_size) {
- return ResultSuccess;
- }
+ // Iterate over the memory, unmapping as we go.
+ auto it = block_manager->FindIterator(cur_address);
+ while (true) {
+ // Check that the iterator is valid.
+ ASSERT(it != block_manager->end());
- // Unmap each region within the range
- KPageLinkedList page_linked_list;
- block_manager->IterateForRange(addr, end_addr, [&](const KMemoryInfo& info) {
- if (info.state == KMemoryState::Normal) {
- const std::size_t block_size{GetSizeInRange(info, addr, end_addr)};
- const std::size_t block_num_pages{block_size / PageSize};
- const VAddr block_addr{GetAddressInRange(info, addr)};
+ // Get the memory info.
+ const KMemoryInfo info = it->GetMemoryInfo();
- AddRegionToPages(block_addr, block_size / PageSize, page_linked_list);
+ // If the memory state is normal, we need to unmap it.
+ if (info.GetState() == KMemoryState::Normal) {
+ // Determine the range to unmap.
+ const size_t cur_pages = std::min(VAddr(info.GetEndAddress()) - cur_address,
+ last_address + 1 - cur_address) /
+ PageSize;
- if (result = Operate(block_addr, block_num_pages, KMemoryPermission::None,
- OperationType::Unmap);
- result.IsError()) {
- return;
- }
+ // Unmap.
+ R_TRY(Operate(cur_address, cur_pages, KMemoryPermission::None, OperationType::Unmap));
}
- });
- if (result.IsError()) {
- return result;
- }
- const std::size_t num_pages{size / PageSize};
- system.Kernel().MemoryManager().Free(page_linked_list, num_pages, memory_pool,
- allocation_option);
+ // Check if we're done.
+ if (last_address <= info.GetLastAddress()) {
+ break;
+ }
- block_manager->Update(addr, num_pages, KMemoryState::Free);
+ // Advance.
+ cur_address = info.GetEndAddress();
+ ++it;
+ }
+ // Release the memory resource.
+ mapped_physical_memory_size -= mapped_size;
auto process{system.Kernel().CurrentProcess()};
process->GetResourceLimit()->Release(LimitableResource::PhysicalMemory, mapped_size);
- mapped_physical_memory_size -= mapped_size;
+
+ // Update memory blocks.
+ block_manager->Update(address, size / PageSize, KMemoryState::Free, KMemoryPermission::None,
+ KMemoryAttribute::None);
+
+ // TODO(bunnei): This is a workaround until the next set of changes, where we add reference
+ // counting for mapped pages. Until then, we must manually close the reference to the page
+ // group.
+ system.Kernel().MemoryManager().Close(pg);
+
+ // We succeeded.
+ remap_guard.Cancel();
return ResultSuccess;
}
-ResultCode KPageTable::MapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) {
+Result KPageTable::MapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) {
KScopedLightLock lk(general_lock);
KMemoryState src_state{};
@@ -559,7 +1147,7 @@ ResultCode KPageTable::MapMemory(VAddr dst_addr, VAddr src_addr, std::size_t siz
return ResultInvalidCurrentMemory;
}
- KPageLinkedList page_linked_list;
+ KPageGroup page_linked_list;
const std::size_t num_pages{size / PageSize};
AddRegionToPages(src_addr, num_pages, page_linked_list);
@@ -585,7 +1173,7 @@ ResultCode KPageTable::MapMemory(VAddr dst_addr, VAddr src_addr, std::size_t siz
return ResultSuccess;
}
-ResultCode KPageTable::UnmapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) {
+Result KPageTable::UnmapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) {
KScopedLightLock lk(general_lock);
KMemoryState src_state{};
@@ -600,8 +1188,8 @@ ResultCode KPageTable::UnmapMemory(VAddr dst_addr, VAddr src_addr, std::size_t s
KMemoryPermission::None, KMemoryAttribute::Mask,
KMemoryAttribute::None, KMemoryAttribute::IpcAndDeviceMapped));
- KPageLinkedList src_pages;
- KPageLinkedList dst_pages;
+ KPageGroup src_pages;
+ KPageGroup dst_pages;
const std::size_t num_pages{size / PageSize};
AddRegionToPages(src_addr, num_pages, src_pages);
@@ -627,8 +1215,8 @@ ResultCode KPageTable::UnmapMemory(VAddr dst_addr, VAddr src_addr, std::size_t s
return ResultSuccess;
}
-ResultCode KPageTable::MapPages(VAddr addr, const KPageLinkedList& page_linked_list,
- KMemoryPermission perm) {
+Result KPageTable::MapPages(VAddr addr, const KPageGroup& page_linked_list,
+ KMemoryPermission perm) {
ASSERT(this->IsLockedByCurrentThread());
VAddr cur_addr{addr};
@@ -651,8 +1239,8 @@ ResultCode KPageTable::MapPages(VAddr addr, const KPageLinkedList& page_linked_l
return ResultSuccess;
}
-ResultCode KPageTable::MapPages(VAddr address, KPageLinkedList& page_linked_list,
- KMemoryState state, KMemoryPermission perm) {
+Result KPageTable::MapPages(VAddr address, KPageGroup& page_linked_list, KMemoryState state,
+ KMemoryPermission perm) {
// Check that the map is in range.
const std::size_t num_pages{page_linked_list.GetNumPages()};
const std::size_t size{num_pages * PageSize};
@@ -675,15 +1263,54 @@ ResultCode KPageTable::MapPages(VAddr address, KPageLinkedList& page_linked_list
return ResultSuccess;
}
-ResultCode KPageTable::UnmapPages(VAddr addr, const KPageLinkedList& page_linked_list) {
+Result KPageTable::MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment,
+ PAddr phys_addr, bool is_pa_valid, VAddr region_start,
+ std::size_t region_num_pages, KMemoryState state,
+ KMemoryPermission perm) {
+ ASSERT(Common::IsAligned(alignment, PageSize) && alignment >= PageSize);
+
+ // Ensure this is a valid map request.
+ R_UNLESS(this->CanContain(region_start, region_num_pages * PageSize, state),
+ ResultInvalidCurrentMemory);
+ R_UNLESS(num_pages < region_num_pages, ResultOutOfMemory);
+
+ // Lock the table.
+ KScopedLightLock lk(general_lock);
+
+ // Find a random address to map at.
+ VAddr addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment, 0,
+ this->GetNumGuardPages());
+ R_UNLESS(addr != 0, ResultOutOfMemory);
+ ASSERT(Common::IsAligned(addr, alignment));
+ ASSERT(this->CanContain(addr, num_pages * PageSize, state));
+ ASSERT(this->CheckMemoryState(addr, num_pages * PageSize, KMemoryState::All, KMemoryState::Free,
+ KMemoryPermission::None, KMemoryPermission::None,
+ KMemoryAttribute::None, KMemoryAttribute::None)
+ .IsSuccess());
+
+ // Perform mapping operation.
+ if (is_pa_valid) {
+ R_TRY(this->Operate(addr, num_pages, perm, OperationType::Map, phys_addr));
+ } else {
+ UNIMPLEMENTED();
+ }
+
+ // Update the blocks.
+ block_manager->Update(addr, num_pages, state, perm);
+
+ // We successfully mapped the pages.
+ *out_addr = addr;
+ return ResultSuccess;
+}
+
+Result KPageTable::UnmapPages(VAddr addr, const KPageGroup& page_linked_list) {
ASSERT(this->IsLockedByCurrentThread());
VAddr cur_addr{addr};
for (const auto& node : page_linked_list.Nodes()) {
- const std::size_t num_pages{(addr - cur_addr) / PageSize};
- if (const auto result{
- Operate(addr, num_pages, KMemoryPermission::None, OperationType::Unmap)};
+ if (const auto result{Operate(cur_addr, node.GetNumPages(), KMemoryPermission::None,
+ OperationType::Unmap)};
result.IsError()) {
return result;
}
@@ -694,8 +1321,7 @@ ResultCode KPageTable::UnmapPages(VAddr addr, const KPageLinkedList& page_linked
return ResultSuccess;
}
-ResultCode KPageTable::UnmapPages(VAddr addr, KPageLinkedList& page_linked_list,
- KMemoryState state) {
+Result KPageTable::UnmapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state) {
// Check that the unmap is in range.
const std::size_t num_pages{page_linked_list.GetNumPages()};
const std::size_t size{num_pages * PageSize};
@@ -718,8 +1344,57 @@ ResultCode KPageTable::UnmapPages(VAddr addr, KPageLinkedList& page_linked_list,
return ResultSuccess;
}
-ResultCode KPageTable::SetProcessMemoryPermission(VAddr addr, std::size_t size,
- Svc::MemoryPermission svc_perm) {
+Result KPageTable::UnmapPages(VAddr address, std::size_t num_pages, KMemoryState state) {
+ // Check that the unmap is in range.
+ const std::size_t size = num_pages * PageSize;
+ R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
+
+ // Lock the table.
+ KScopedLightLock lk(general_lock);
+
+ // Check the memory state.
+ std::size_t num_allocator_blocks{};
+ R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size,
+ KMemoryState::All, state, KMemoryPermission::None,
+ KMemoryPermission::None, KMemoryAttribute::All,
+ KMemoryAttribute::None));
+
+ // Perform the unmap.
+ R_TRY(Operate(address, num_pages, KMemoryPermission::None, OperationType::Unmap));
+
+ // Update the blocks.
+ block_manager->Update(address, num_pages, KMemoryState::Free, KMemoryPermission::None);
+
+ return ResultSuccess;
+}
+
+Result KPageTable::MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t num_pages,
+ KMemoryState state_mask, KMemoryState state,
+ KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr) {
+ // Ensure that the page group isn't null.
+ ASSERT(out != nullptr);
+
+ // Make sure that the region we're mapping is valid for the table.
+ const size_t size = num_pages * PageSize;
+ R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
+
+ // Lock the table.
+ KScopedLightLock lk(general_lock);
+
+ // Check if state allows us to create the group.
+ R_TRY(this->CheckMemoryState(address, size, state_mask | KMemoryState::FlagReferenceCounted,
+ state | KMemoryState::FlagReferenceCounted, perm_mask, perm,
+ attr_mask, attr));
+
+ // Create a new page group for the region.
+ R_TRY(this->MakePageGroup(*out, address, num_pages));
+
+ return ResultSuccess;
+}
+
+Result KPageTable::SetProcessMemoryPermission(VAddr addr, std::size_t size,
+ Svc::MemoryPermission svc_perm) {
const size_t num_pages = size / PageSize;
// Lock the table.
@@ -753,7 +1428,7 @@ ResultCode KPageTable::SetProcessMemoryPermission(VAddr addr, std::size_t size,
new_state = KMemoryState::AliasCodeData;
break;
default:
- UNREACHABLE();
+ ASSERT(false);
}
}
@@ -791,7 +1466,7 @@ KMemoryInfo KPageTable::QueryInfo(VAddr addr) {
return QueryInfoImpl(addr);
}
-ResultCode KPageTable::ReserveTransferMemory(VAddr addr, std::size_t size, KMemoryPermission perm) {
+Result KPageTable::ReserveTransferMemory(VAddr addr, std::size_t size, KMemoryPermission perm) {
KScopedLightLock lk(general_lock);
KMemoryState state{};
@@ -809,7 +1484,7 @@ ResultCode KPageTable::ReserveTransferMemory(VAddr addr, std::size_t size, KMemo
return ResultSuccess;
}
-ResultCode KPageTable::ResetTransferMemory(VAddr addr, std::size_t size) {
+Result KPageTable::ResetTransferMemory(VAddr addr, std::size_t size) {
KScopedLightLock lk(general_lock);
KMemoryState state{};
@@ -824,8 +1499,8 @@ ResultCode KPageTable::ResetTransferMemory(VAddr addr, std::size_t size) {
return ResultSuccess;
}
-ResultCode KPageTable::SetMemoryPermission(VAddr addr, std::size_t size,
- Svc::MemoryPermission svc_perm) {
+Result KPageTable::SetMemoryPermission(VAddr addr, std::size_t size,
+ Svc::MemoryPermission svc_perm) {
const size_t num_pages = size / PageSize;
// Lock the table.
@@ -852,7 +1527,7 @@ ResultCode KPageTable::SetMemoryPermission(VAddr addr, std::size_t size,
return ResultSuccess;
}
-ResultCode KPageTable::SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask, u32 attr) {
+Result KPageTable::SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask, u32 attr) {
const size_t num_pages = size / PageSize;
ASSERT((static_cast<KMemoryAttribute>(mask) | KMemoryAttribute::SetMask) ==
KMemoryAttribute::SetMask);
@@ -887,7 +1562,7 @@ ResultCode KPageTable::SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask
return ResultSuccess;
}
-ResultCode KPageTable::SetMaxHeapSize(std::size_t size) {
+Result KPageTable::SetMaxHeapSize(std::size_t size) {
// Lock the table.
KScopedLightLock lk(general_lock);
@@ -899,7 +1574,7 @@ ResultCode KPageTable::SetMaxHeapSize(std::size_t size) {
return ResultSuccess;
}
-ResultCode KPageTable::SetHeapSize(VAddr* out, std::size_t size) {
+Result KPageTable::SetHeapSize(VAddr* out, std::size_t size) {
// Lock the physical memory mutex.
KScopedLightLock map_phys_mem_lk(map_physical_memory_lock);
@@ -966,9 +1641,16 @@ ResultCode KPageTable::SetHeapSize(VAddr* out, std::size_t size) {
R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
// Allocate pages for the heap extension.
- KPageLinkedList page_linked_list;
- R_TRY(system.Kernel().MemoryManager().Allocate(page_linked_list, allocation_size / PageSize,
- memory_pool, allocation_option));
+ KPageGroup pg;
+ R_TRY(system.Kernel().MemoryManager().AllocateAndOpen(
+ &pg, allocation_size / PageSize,
+ KMemoryManager::EncodeOption(memory_pool, allocation_option)));
+
+ // Clear all the newly allocated pages.
+ for (const auto& it : pg.Nodes()) {
+ std::memset(system.DeviceMemory().GetPointer(it.GetAddress()), heap_fill_value,
+ it.GetSize());
+ }
// Map the pages.
{
@@ -987,7 +1669,7 @@ ResultCode KPageTable::SetHeapSize(VAddr* out, std::size_t size) {
// Map the pages.
const auto num_pages = allocation_size / PageSize;
- R_TRY(Operate(current_heap_end, num_pages, page_linked_list, OperationType::MapGroup));
+ R_TRY(Operate(current_heap_end, num_pages, pg, OperationType::MapGroup));
// Clear all the newly allocated pages.
for (std::size_t cur_page = 0; cur_page < num_pages; ++cur_page) {
@@ -1034,9 +1716,10 @@ ResultVal<VAddr> KPageTable::AllocateAndMapMemory(std::size_t needed_num_pages,
if (is_map_only) {
R_TRY(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr));
} else {
- KPageLinkedList page_group;
- R_TRY(system.Kernel().MemoryManager().Allocate(page_group, needed_num_pages, memory_pool,
- allocation_option));
+ KPageGroup page_group;
+ R_TRY(system.Kernel().MemoryManager().AllocateAndOpenForProcess(
+ &page_group, needed_num_pages,
+ KMemoryManager::EncodeOption(memory_pool, allocation_option), 0, 0));
R_TRY(Operate(addr, needed_num_pages, page_group, OperationType::MapGroup));
}
@@ -1045,11 +1728,11 @@ ResultVal<VAddr> KPageTable::AllocateAndMapMemory(std::size_t needed_num_pages,
return addr;
}
-ResultCode KPageTable::LockForDeviceAddressSpace(VAddr addr, std::size_t size) {
+Result KPageTable::LockForDeviceAddressSpace(VAddr addr, std::size_t size) {
KScopedLightLock lk(general_lock);
KMemoryPermission perm{};
- if (const ResultCode result{CheckMemoryState(
+ if (const Result result{CheckMemoryState(
nullptr, &perm, nullptr, nullptr, addr, size, KMemoryState::FlagCanChangeAttribute,
KMemoryState::FlagCanChangeAttribute, KMemoryPermission::None, KMemoryPermission::None,
KMemoryAttribute::LockedAndIpcLocked, KMemoryAttribute::None,
@@ -1068,11 +1751,11 @@ ResultCode KPageTable::LockForDeviceAddressSpace(VAddr addr, std::size_t size) {
return ResultSuccess;
}
-ResultCode KPageTable::UnlockForDeviceAddressSpace(VAddr addr, std::size_t size) {
+Result KPageTable::UnlockForDeviceAddressSpace(VAddr addr, std::size_t size) {
KScopedLightLock lk(general_lock);
KMemoryPermission perm{};
- if (const ResultCode result{CheckMemoryState(
+ if (const Result result{CheckMemoryState(
nullptr, &perm, nullptr, nullptr, addr, size, KMemoryState::FlagCanChangeAttribute,
KMemoryState::FlagCanChangeAttribute, KMemoryPermission::None, KMemoryPermission::None,
KMemoryAttribute::LockedAndIpcLocked, KMemoryAttribute::None,
@@ -1091,61 +1774,24 @@ ResultCode KPageTable::UnlockForDeviceAddressSpace(VAddr addr, std::size_t size)
return ResultSuccess;
}
-ResultCode KPageTable::LockForCodeMemory(VAddr addr, std::size_t size) {
- KScopedLightLock lk(general_lock);
-
- KMemoryPermission new_perm = KMemoryPermission::NotMapped | KMemoryPermission::KernelReadWrite;
-
- KMemoryPermission old_perm{};
-
- if (const ResultCode result{CheckMemoryState(
- nullptr, &old_perm, nullptr, nullptr, addr, size, KMemoryState::FlagCanCodeMemory,
- KMemoryState::FlagCanCodeMemory, KMemoryPermission::All,
- KMemoryPermission::UserReadWrite, KMemoryAttribute::All, KMemoryAttribute::None)};
- result.IsError()) {
- return result;
- }
-
- new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm;
-
- block_manager->UpdateLock(
- addr, size / PageSize,
- [](KMemoryBlockManager::iterator block, KMemoryPermission permission) {
- block->ShareToDevice(permission);
- },
- new_perm);
-
- return ResultSuccess;
+Result KPageTable::LockForCodeMemory(KPageGroup* out, VAddr addr, std::size_t size) {
+ return this->LockMemoryAndOpen(
+ out, nullptr, addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory,
+ KMemoryPermission::All, KMemoryPermission::UserReadWrite, KMemoryAttribute::All,
+ KMemoryAttribute::None,
+ static_cast<KMemoryPermission>(KMemoryPermission::NotMapped |
+ KMemoryPermission::KernelReadWrite),
+ KMemoryAttribute::Locked);
}
-ResultCode KPageTable::UnlockForCodeMemory(VAddr addr, std::size_t size) {
- KScopedLightLock lk(general_lock);
-
- KMemoryPermission new_perm = KMemoryPermission::UserReadWrite;
-
- KMemoryPermission old_perm{};
-
- if (const ResultCode result{CheckMemoryState(
- nullptr, &old_perm, nullptr, nullptr, addr, size, KMemoryState::FlagCanCodeMemory,
- KMemoryState::FlagCanCodeMemory, KMemoryPermission::None, KMemoryPermission::None,
- KMemoryAttribute::All, KMemoryAttribute::Locked)};
- result.IsError()) {
- return result;
- }
-
- new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm;
-
- block_manager->UpdateLock(
- addr, size / PageSize,
- [](KMemoryBlockManager::iterator block, KMemoryPermission permission) {
- block->UnshareToDevice(permission);
- },
- new_perm);
-
- return ResultSuccess;
+Result KPageTable::UnlockForCodeMemory(VAddr addr, std::size_t size, const KPageGroup& pg) {
+ return this->UnlockMemory(
+ addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory,
+ KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::All,
+ KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite, KMemoryAttribute::Locked, &pg);
}
-ResultCode KPageTable::InitializeMemoryLayout(VAddr start, VAddr end) {
+Result KPageTable::InitializeMemoryLayout(VAddr start, VAddr end) {
block_manager = std::make_unique<KMemoryBlockManager>(start, end);
return ResultSuccess;
@@ -1170,13 +1816,11 @@ bool KPageTable::IsRegionContiguous(VAddr addr, u64 size) const {
}
void KPageTable::AddRegionToPages(VAddr start, std::size_t num_pages,
- KPageLinkedList& page_linked_list) {
+ KPageGroup& page_linked_list) {
VAddr addr{start};
while (addr < start + (num_pages * PageSize)) {
const PAddr paddr{GetPhysicalAddr(addr)};
- if (!paddr) {
- UNREACHABLE();
- }
+ ASSERT(paddr != 0);
page_linked_list.AddBlock(paddr, 1);
addr += PageSize;
}
@@ -1191,8 +1835,8 @@ VAddr KPageTable::AllocateVirtualMemory(VAddr start, std::size_t region_num_page
IsKernel() ? 1 : 4);
}
-ResultCode KPageTable::Operate(VAddr addr, std::size_t num_pages, const KPageLinkedList& page_group,
- OperationType operation) {
+Result KPageTable::Operate(VAddr addr, std::size_t num_pages, const KPageGroup& page_group,
+ OperationType operation) {
ASSERT(this->IsLockedByCurrentThread());
ASSERT(Common::IsAligned(addr, PageSize));
@@ -1207,7 +1851,7 @@ ResultCode KPageTable::Operate(VAddr addr, std::size_t num_pages, const KPageLin
system.Memory().MapMemoryRegion(page_table_impl, addr, size, node.GetAddress());
break;
default:
- UNREACHABLE();
+ ASSERT(false);
}
addr += size;
@@ -1216,8 +1860,8 @@ ResultCode KPageTable::Operate(VAddr addr, std::size_t num_pages, const KPageLin
return ResultSuccess;
}
-ResultCode KPageTable::Operate(VAddr addr, std::size_t num_pages, KMemoryPermission perm,
- OperationType operation, PAddr map_addr) {
+Result KPageTable::Operate(VAddr addr, std::size_t num_pages, KMemoryPermission perm,
+ OperationType operation, PAddr map_addr) {
ASSERT(this->IsLockedByCurrentThread());
ASSERT(num_pages > 0);
@@ -1238,12 +1882,12 @@ ResultCode KPageTable::Operate(VAddr addr, std::size_t num_pages, KMemoryPermiss
case OperationType::ChangePermissionsAndRefresh:
break;
default:
- UNREACHABLE();
+ ASSERT(false);
}
return ResultSuccess;
}
-constexpr VAddr KPageTable::GetRegionAddress(KMemoryState state) const {
+VAddr KPageTable::GetRegionAddress(KMemoryState state) const {
switch (state) {
case KMemoryState::Free:
case KMemoryState::Kernel:
@@ -1275,11 +1919,10 @@ constexpr VAddr KPageTable::GetRegionAddress(KMemoryState state) const {
return code_region_start;
default:
UNREACHABLE();
- return {};
}
}
-constexpr std::size_t KPageTable::GetRegionSize(KMemoryState state) const {
+std::size_t KPageTable::GetRegionSize(KMemoryState state) const {
switch (state) {
case KMemoryState::Free:
case KMemoryState::Kernel:
@@ -1311,7 +1954,6 @@ constexpr std::size_t KPageTable::GetRegionSize(KMemoryState state) const {
return code_region_end - code_region_start;
default:
UNREACHABLE();
- return {};
}
}
@@ -1361,10 +2003,10 @@ bool KPageTable::CanContain(VAddr addr, std::size_t size, KMemoryState state) co
}
}
-ResultCode KPageTable::CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask,
- KMemoryState state, KMemoryPermission perm_mask,
- KMemoryPermission perm, KMemoryAttribute attr_mask,
- KMemoryAttribute attr) const {
+Result KPageTable::CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask,
+ KMemoryState state, KMemoryPermission perm_mask,
+ KMemoryPermission perm, KMemoryAttribute attr_mask,
+ KMemoryAttribute attr) const {
// Validate the states match expectation.
R_UNLESS((info.state & state_mask) == state, ResultInvalidCurrentMemory);
R_UNLESS((info.perm & perm_mask) == perm, ResultInvalidCurrentMemory);
@@ -1373,12 +2015,11 @@ ResultCode KPageTable::CheckMemoryState(const KMemoryInfo& info, KMemoryState st
return ResultSuccess;
}
-ResultCode KPageTable::CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VAddr addr,
- std::size_t size, KMemoryState state_mask,
- KMemoryState state, KMemoryPermission perm_mask,
- KMemoryPermission perm,
- KMemoryAttribute attr_mask,
- KMemoryAttribute attr) const {
+Result KPageTable::CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VAddr addr,
+ std::size_t size, KMemoryState state_mask,
+ KMemoryState state, KMemoryPermission perm_mask,
+ KMemoryPermission perm, KMemoryAttribute attr_mask,
+ KMemoryAttribute attr) const {
ASSERT(this->IsLockedByCurrentThread());
// Get information about the first block.
@@ -1416,12 +2057,12 @@ ResultCode KPageTable::CheckMemoryStateContiguous(std::size_t* out_blocks_needed
return ResultSuccess;
}
-ResultCode KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
- KMemoryAttribute* out_attr, std::size_t* out_blocks_needed,
- VAddr addr, std::size_t size, KMemoryState state_mask,
- KMemoryState state, KMemoryPermission perm_mask,
- KMemoryPermission perm, KMemoryAttribute attr_mask,
- KMemoryAttribute attr, KMemoryAttribute ignore_attr) const {
+Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
+ KMemoryAttribute* out_attr, std::size_t* out_blocks_needed,
+ VAddr addr, std::size_t size, KMemoryState state_mask,
+ KMemoryState state, KMemoryPermission perm_mask,
+ KMemoryPermission perm, KMemoryAttribute attr_mask,
+ KMemoryAttribute attr, KMemoryAttribute ignore_attr) const {
ASSERT(this->IsLockedByCurrentThread());
// Get information about the first block.
@@ -1478,4 +2119,109 @@ ResultCode KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermissi
return ResultSuccess;
}
+Result KPageTable::LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr addr, size_t size,
+ KMemoryState state_mask, KMemoryState state,
+ KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr,
+ KMemoryPermission new_perm, KMemoryAttribute lock_attr) {
+ // Validate basic preconditions.
+ ASSERT((lock_attr & attr) == KMemoryAttribute::None);
+ ASSERT((lock_attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)) ==
+ KMemoryAttribute::None);
+
+ // Validate the lock request.
+ const size_t num_pages = size / PageSize;
+ R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory);
+
+ // Lock the table.
+ KScopedLightLock lk(general_lock);
+
+ // Check that the output page group is empty, if it exists.
+ if (out_pg) {
+ ASSERT(out_pg->GetNumPages() == 0);
+ }
+
+ // Check the state.
+ KMemoryState old_state{};
+ KMemoryPermission old_perm{};
+ KMemoryAttribute old_attr{};
+ size_t num_allocator_blocks{};
+ R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm),
+ std::addressof(old_attr), std::addressof(num_allocator_blocks),
+ addr, size, state_mask | KMemoryState::FlagReferenceCounted,
+ state | KMemoryState::FlagReferenceCounted, perm_mask, perm,
+ attr_mask, attr));
+
+ // Get the physical address, if we're supposed to.
+ if (out_paddr != nullptr) {
+ ASSERT(this->GetPhysicalAddressLocked(out_paddr, addr));
+ }
+
+ // Make the page group, if we're supposed to.
+ if (out_pg != nullptr) {
+ R_TRY(this->MakePageGroup(*out_pg, addr, num_pages));
+ }
+
+ // Decide on new perm and attr.
+ new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm;
+ KMemoryAttribute new_attr = static_cast<KMemoryAttribute>(old_attr | lock_attr);
+
+ // Update permission, if we need to.
+ if (new_perm != old_perm) {
+ R_TRY(Operate(addr, num_pages, new_perm, OperationType::ChangePermissions));
+ }
+
+ // Apply the memory block updates.
+ block_manager->Update(addr, num_pages, old_state, new_perm, new_attr);
+
+ return ResultSuccess;
+}
+
+Result KPageTable::UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask,
+ KMemoryState state, KMemoryPermission perm_mask,
+ KMemoryPermission perm, KMemoryAttribute attr_mask,
+ KMemoryAttribute attr, KMemoryPermission new_perm,
+ KMemoryAttribute lock_attr, const KPageGroup* pg) {
+ // Validate basic preconditions.
+ ASSERT((attr_mask & lock_attr) == lock_attr);
+ ASSERT((attr & lock_attr) == lock_attr);
+
+ // Validate the unlock request.
+ const size_t num_pages = size / PageSize;
+ R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory);
+
+ // Lock the table.
+ KScopedLightLock lk(general_lock);
+
+ // Check the state.
+ KMemoryState old_state{};
+ KMemoryPermission old_perm{};
+ KMemoryAttribute old_attr{};
+ size_t num_allocator_blocks{};
+ R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm),
+ std::addressof(old_attr), std::addressof(num_allocator_blocks),
+ addr, size, state_mask | KMemoryState::FlagReferenceCounted,
+ state | KMemoryState::FlagReferenceCounted, perm_mask, perm,
+ attr_mask, attr));
+
+ // Check the page group.
+ if (pg != nullptr) {
+ R_UNLESS(this->IsValidPageGroup(*pg, addr, num_pages), ResultInvalidMemoryRegion);
+ }
+
+ // Decide on new perm and attr.
+ new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm;
+ KMemoryAttribute new_attr = static_cast<KMemoryAttribute>(old_attr & ~lock_attr);
+
+ // Update permission, if we need to.
+ if (new_perm != old_perm) {
+ R_TRY(Operate(addr, num_pages, new_perm, OperationType::ChangePermissions));
+ }
+
+ // Apply the memory block updates.
+ block_manager->Update(addr, num_pages, old_state, new_perm, new_attr);
+
+ return ResultSuccess;
+}
+
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h
index c98887d34..25774f232 100644
--- a/src/core/hle/kernel/k_page_table.h
+++ b/src/core/hle/kernel/k_page_table.h
@@ -1,6 +1,5 @@
-// Copyright 2020 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
@@ -12,6 +11,7 @@
#include "core/file_sys/program_metadata.h"
#include "core/hle/kernel/k_light_lock.h"
#include "core/hle/kernel/k_memory_block.h"
+#include "core/hle/kernel/k_memory_layout.h"
#include "core/hle/kernel/k_memory_manager.h"
#include "core/hle/result.h"
@@ -25,45 +25,57 @@ class KMemoryBlockManager;
class KPageTable final {
public:
+ enum class ICacheInvalidationStrategy : u32 { InvalidateRange, InvalidateAll };
+
YUZU_NON_COPYABLE(KPageTable);
YUZU_NON_MOVEABLE(KPageTable);
explicit KPageTable(Core::System& system_);
~KPageTable();
- ResultCode InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr,
- VAddr code_addr, std::size_t code_size,
- KMemoryManager::Pool pool);
- ResultCode MapProcessCode(VAddr addr, std::size_t pages_count, KMemoryState state,
- KMemoryPermission perm);
- ResultCode MapCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size);
- ResultCode UnmapCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size);
- ResultCode UnmapProcessMemory(VAddr dst_addr, std::size_t size, KPageTable& src_page_table,
- VAddr src_addr);
- ResultCode MapPhysicalMemory(VAddr addr, std::size_t size);
- ResultCode UnmapPhysicalMemory(VAddr addr, std::size_t size);
- ResultCode MapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size);
- ResultCode UnmapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size);
- ResultCode MapPages(VAddr addr, KPageLinkedList& page_linked_list, KMemoryState state,
- KMemoryPermission perm);
- ResultCode UnmapPages(VAddr addr, KPageLinkedList& page_linked_list, KMemoryState state);
- ResultCode SetProcessMemoryPermission(VAddr addr, std::size_t size,
- Svc::MemoryPermission svc_perm);
+ Result InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr,
+ VAddr code_addr, std::size_t code_size, KMemoryManager::Pool pool);
+ Result MapProcessCode(VAddr addr, std::size_t pages_count, KMemoryState state,
+ KMemoryPermission perm);
+ Result MapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size);
+ Result UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size,
+ ICacheInvalidationStrategy icache_invalidation_strategy);
+ Result UnmapProcessMemory(VAddr dst_addr, std::size_t size, KPageTable& src_page_table,
+ VAddr src_addr);
+ Result MapPhysicalMemory(VAddr addr, std::size_t size);
+ Result UnmapPhysicalMemory(VAddr addr, std::size_t size);
+ Result MapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size);
+ Result UnmapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size);
+ Result MapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state,
+ KMemoryPermission perm);
+ Result MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment, PAddr phys_addr,
+ KMemoryState state, KMemoryPermission perm) {
+ return this->MapPages(out_addr, num_pages, alignment, phys_addr, true,
+ this->GetRegionAddress(state), this->GetRegionSize(state) / PageSize,
+ state, perm);
+ }
+ Result UnmapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state);
+ Result UnmapPages(VAddr address, std::size_t num_pages, KMemoryState state);
+ Result SetProcessMemoryPermission(VAddr addr, std::size_t size, Svc::MemoryPermission svc_perm);
KMemoryInfo QueryInfo(VAddr addr);
- ResultCode ReserveTransferMemory(VAddr addr, std::size_t size, KMemoryPermission perm);
- ResultCode ResetTransferMemory(VAddr addr, std::size_t size);
- ResultCode SetMemoryPermission(VAddr addr, std::size_t size, Svc::MemoryPermission perm);
- ResultCode SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask, u32 attr);
- ResultCode SetMaxHeapSize(std::size_t size);
- ResultCode SetHeapSize(VAddr* out, std::size_t size);
+ Result ReserveTransferMemory(VAddr addr, std::size_t size, KMemoryPermission perm);
+ Result ResetTransferMemory(VAddr addr, std::size_t size);
+ Result SetMemoryPermission(VAddr addr, std::size_t size, Svc::MemoryPermission perm);
+ Result SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask, u32 attr);
+ Result SetMaxHeapSize(std::size_t size);
+ Result SetHeapSize(VAddr* out, std::size_t size);
ResultVal<VAddr> AllocateAndMapMemory(std::size_t needed_num_pages, std::size_t align,
bool is_map_only, VAddr region_start,
std::size_t region_num_pages, KMemoryState state,
KMemoryPermission perm, PAddr map_addr = 0);
- ResultCode LockForDeviceAddressSpace(VAddr addr, std::size_t size);
- ResultCode UnlockForDeviceAddressSpace(VAddr addr, std::size_t size);
- ResultCode LockForCodeMemory(VAddr addr, std::size_t size);
- ResultCode UnlockForCodeMemory(VAddr addr, std::size_t size);
+ Result LockForDeviceAddressSpace(VAddr addr, std::size_t size);
+ Result UnlockForDeviceAddressSpace(VAddr addr, std::size_t size);
+ Result LockForCodeMemory(KPageGroup* out, VAddr addr, std::size_t size);
+ Result UnlockForCodeMemory(VAddr addr, std::size_t size, const KPageGroup& pg);
+ Result MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t num_pages,
+ KMemoryState state_mask, KMemoryState state,
+ KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr);
Common::PageTable& PageTableImpl() {
return page_table_impl;
@@ -88,68 +100,97 @@ private:
KMemoryAttribute::IpcLocked |
KMemoryAttribute::DeviceShared;
- ResultCode InitializeMemoryLayout(VAddr start, VAddr end);
- ResultCode MapPages(VAddr addr, const KPageLinkedList& page_linked_list,
- KMemoryPermission perm);
- ResultCode UnmapPages(VAddr addr, const KPageLinkedList& page_linked_list);
+ Result InitializeMemoryLayout(VAddr start, VAddr end);
+ Result MapPages(VAddr addr, const KPageGroup& page_linked_list, KMemoryPermission perm);
+ Result MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment, PAddr phys_addr,
+ bool is_pa_valid, VAddr region_start, std::size_t region_num_pages,
+ KMemoryState state, KMemoryPermission perm);
+ Result UnmapPages(VAddr addr, const KPageGroup& page_linked_list);
bool IsRegionMapped(VAddr address, u64 size);
bool IsRegionContiguous(VAddr addr, u64 size) const;
- void AddRegionToPages(VAddr start, std::size_t num_pages, KPageLinkedList& page_linked_list);
+ void AddRegionToPages(VAddr start, std::size_t num_pages, KPageGroup& page_linked_list);
KMemoryInfo QueryInfoImpl(VAddr addr);
VAddr AllocateVirtualMemory(VAddr start, std::size_t region_num_pages, u64 needed_num_pages,
std::size_t align);
- ResultCode Operate(VAddr addr, std::size_t num_pages, const KPageLinkedList& page_group,
- OperationType operation);
- ResultCode Operate(VAddr addr, std::size_t num_pages, KMemoryPermission perm,
- OperationType operation, PAddr map_addr = 0);
- constexpr VAddr GetRegionAddress(KMemoryState state) const;
- constexpr std::size_t GetRegionSize(KMemoryState state) const;
-
- ResultCode CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VAddr addr,
- std::size_t size, KMemoryState state_mask,
- KMemoryState state, KMemoryPermission perm_mask,
- KMemoryPermission perm, KMemoryAttribute attr_mask,
- KMemoryAttribute attr) const;
- ResultCode CheckMemoryStateContiguous(VAddr addr, std::size_t size, KMemoryState state_mask,
- KMemoryState state, KMemoryPermission perm_mask,
- KMemoryPermission perm, KMemoryAttribute attr_mask,
- KMemoryAttribute attr) const {
+ Result Operate(VAddr addr, std::size_t num_pages, const KPageGroup& page_group,
+ OperationType operation);
+ Result Operate(VAddr addr, std::size_t num_pages, KMemoryPermission perm,
+ OperationType operation, PAddr map_addr = 0);
+ VAddr GetRegionAddress(KMemoryState state) const;
+ std::size_t GetRegionSize(KMemoryState state) const;
+
+ VAddr FindFreeArea(VAddr region_start, std::size_t region_num_pages, std::size_t num_pages,
+ std::size_t alignment, std::size_t offset, std::size_t guard_pages);
+
+ Result CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VAddr addr, std::size_t size,
+ KMemoryState state_mask, KMemoryState state,
+ KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr) const;
+ Result CheckMemoryStateContiguous(VAddr addr, std::size_t size, KMemoryState state_mask,
+ KMemoryState state, KMemoryPermission perm_mask,
+ KMemoryPermission perm, KMemoryAttribute attr_mask,
+ KMemoryAttribute attr) const {
return this->CheckMemoryStateContiguous(nullptr, addr, size, state_mask, state, perm_mask,
perm, attr_mask, attr);
}
- ResultCode CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask,
- KMemoryState state, KMemoryPermission perm_mask,
- KMemoryPermission perm, KMemoryAttribute attr_mask,
- KMemoryAttribute attr) const;
- ResultCode CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
- KMemoryAttribute* out_attr, std::size_t* out_blocks_needed,
- VAddr addr, std::size_t size, KMemoryState state_mask,
- KMemoryState state, KMemoryPermission perm_mask,
- KMemoryPermission perm, KMemoryAttribute attr_mask,
- KMemoryAttribute attr,
- KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const;
- ResultCode CheckMemoryState(std::size_t* out_blocks_needed, VAddr addr, std::size_t size,
- KMemoryState state_mask, KMemoryState state,
- KMemoryPermission perm_mask, KMemoryPermission perm,
- KMemoryAttribute attr_mask, KMemoryAttribute attr,
- KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const {
+ Result CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask, KMemoryState state,
+ KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr) const;
+ Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
+ KMemoryAttribute* out_attr, std::size_t* out_blocks_needed, VAddr addr,
+ std::size_t size, KMemoryState state_mask, KMemoryState state,
+ KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr,
+ KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const;
+ Result CheckMemoryState(std::size_t* out_blocks_needed, VAddr addr, std::size_t size,
+ KMemoryState state_mask, KMemoryState state,
+ KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr,
+ KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const {
return CheckMemoryState(nullptr, nullptr, nullptr, out_blocks_needed, addr, size,
state_mask, state, perm_mask, perm, attr_mask, attr, ignore_attr);
}
- ResultCode CheckMemoryState(VAddr addr, size_t size, KMemoryState state_mask,
- KMemoryState state, KMemoryPermission perm_mask,
- KMemoryPermission perm, KMemoryAttribute attr_mask,
- KMemoryAttribute attr,
- KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const {
+ Result CheckMemoryState(VAddr addr, std::size_t size, KMemoryState state_mask,
+ KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr,
+ KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const {
return this->CheckMemoryState(nullptr, addr, size, state_mask, state, perm_mask, perm,
attr_mask, attr, ignore_attr);
}
+ Result LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr addr, size_t size,
+ KMemoryState state_mask, KMemoryState state,
+ KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr,
+ KMemoryPermission new_perm, KMemoryAttribute lock_attr);
+ Result UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask, KMemoryState state,
+ KMemoryPermission perm_mask, KMemoryPermission perm,
+ KMemoryAttribute attr_mask, KMemoryAttribute attr,
+ KMemoryPermission new_perm, KMemoryAttribute lock_attr,
+ const KPageGroup* pg);
+
+ Result MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages);
+ bool IsValidPageGroup(const KPageGroup& pg, VAddr addr, size_t num_pages);
+
bool IsLockedByCurrentThread() const {
return general_lock.IsLockedByCurrentThread();
}
+ bool IsHeapPhysicalAddress(const KMemoryLayout& layout, PAddr phys_addr) {
+ ASSERT(this->IsLockedByCurrentThread());
+
+ return layout.IsHeapPhysicalAddress(cached_physical_heap_region, phys_addr);
+ }
+
+ bool GetPhysicalAddressLocked(PAddr* out, VAddr virt_addr) const {
+ ASSERT(this->IsLockedByCurrentThread());
+
+ *out = GetPhysicalAddr(virt_addr);
+
+ return *out != 0;
+ }
+
mutable KLightLock general_lock;
mutable KLightLock map_physical_memory_lock;
@@ -210,7 +251,7 @@ public:
constexpr VAddr GetAliasCodeRegionSize() const {
return alias_code_region_end - alias_code_region_start;
}
- size_t GetNormalMemorySize() {
+ std::size_t GetNormalMemorySize() {
KScopedLightLock lk(general_lock);
return GetHeapSize() + mapped_physical_memory_size;
}
@@ -253,9 +294,10 @@ public:
constexpr bool IsInsideASLRRegion(VAddr address, std::size_t size) const {
return !IsOutsideASLRRegion(address, size);
}
-
- PAddr GetPhysicalAddr(VAddr addr) {
- ASSERT(IsLockedByCurrentThread());
+ constexpr std::size_t GetNumGuardPages() const {
+ return IsKernel() ? 1 : 4;
+ }
+ PAddr GetPhysicalAddr(VAddr addr) const {
const auto backing_addr = page_table_impl.backing_addr[addr >> PageBits];
ASSERT(backing_addr);
return backing_addr + addr;
@@ -276,10 +318,6 @@ private:
return is_aslr_enabled;
}
- constexpr std::size_t GetNumGuardPages() const {
- return IsKernel() ? 1 : 4;
- }
-
constexpr bool ContainsPages(VAddr addr, std::size_t num_pages) const {
return (address_space_start <= addr) &&
(num_pages <= (address_space_end - address_space_start) / PageSize) &&
@@ -311,6 +349,9 @@ private:
bool is_kernel{};
bool is_aslr_enabled{};
+ u32 heap_fill_value{};
+ const KMemoryRegion* cached_physical_heap_region{};
+
KMemoryManager::Pool memory_pool{KMemoryManager::Pool::Application};
KMemoryManager::Direction allocation_option{KMemoryManager::Direction::FromFront};
diff --git a/src/core/hle/kernel/k_port.cpp b/src/core/hle/kernel/k_port.cpp
index a8ba09c4a..7a5a9dc2a 100644
--- a/src/core/hle/kernel/k_port.cpp
+++ b/src/core/hle/kernel/k_port.cpp
@@ -1,6 +1,5 @@
-// Copyright 2021 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#include "core/hle/kernel/hle_ipc.h"
#include "core/hle/kernel/k_port.h"
@@ -51,13 +50,18 @@ bool KPort::IsServerClosed() const {
return state == State::ServerClosed;
}
-ResultCode KPort::EnqueueSession(KServerSession* session) {
+Result KPort::EnqueueSession(KServerSession* session) {
KScopedSchedulerLock sl{kernel};
R_UNLESS(state == State::Normal, ResultPortClosed);
server.EnqueueSession(session);
- server.GetSessionRequestHandler()->ClientConnected(server.AcceptSession());
+
+ if (auto session_ptr = server.GetSessionRequestHandler().lock()) {
+ session_ptr->ClientConnected(server.AcceptSession());
+ } else {
+ ASSERT(false);
+ }
return ResultSuccess;
}
diff --git a/src/core/hle/kernel/k_port.h b/src/core/hle/kernel/k_port.h
index b6e4a1fcd..0cfc16dab 100644
--- a/src/core/hle/kernel/k_port.h
+++ b/src/core/hle/kernel/k_port.h
@@ -1,6 +1,5 @@
-// Copyright 2021 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
@@ -35,7 +34,7 @@ public:
bool IsServerClosed() const;
- ResultCode EnqueueSession(KServerSession* session);
+ Result EnqueueSession(KServerSession* session);
KClientPort& GetClientPort() {
return client;
diff --git a/src/core/hle/kernel/k_priority_queue.h b/src/core/hle/kernel/k_priority_queue.h
index bd779739d..cb2512b0b 100644
--- a/src/core/hle/kernel/k_priority_queue.h
+++ b/src/core/hle/kernel/k_priority_queue.h
@@ -1,9 +1,5 @@
-// Copyright 2020 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
-
-// This file references various implementation details from Atmosphere, an open-source firmware for
-// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
+// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp
index 85c506979..d3e99665f 100644
--- a/src/core/hle/kernel/k_process.cpp
+++ b/src/core/hle/kernel/k_process.cpp
@@ -1,6 +1,5 @@
-// Copyright 2015 Citra Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: 2015 Citra Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#include <algorithm>
#include <bitset>
@@ -13,7 +12,6 @@
#include "common/scope_exit.h"
#include "common/settings.h"
#include "core/core.h"
-#include "core/device_memory.h"
#include "core/file_sys/program_metadata.h"
#include "core/hle/kernel/code_set.h"
#include "core/hle/kernel/k_memory_block_manager.h"
@@ -24,7 +22,6 @@
#include "core/hle/kernel/k_scoped_resource_reservation.h"
#include "core/hle/kernel/k_shared_memory.h"
#include "core/hle/kernel/k_shared_memory_info.h"
-#include "core/hle/kernel/k_slab_heap.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/svc_results.h"
@@ -59,76 +56,22 @@ void SetupMainThread(Core::System& system, KProcess& owner_process, u32 priority
thread->GetContext64().cpu_registers[0] = 0;
thread->GetContext32().cpu_registers[1] = thread_handle;
thread->GetContext64().cpu_registers[1] = thread_handle;
- thread->DisableDispatch();
- auto& kernel = system.Kernel();
- // Threads by default are dormant, wake up the main thread so it runs when the scheduler fires
- {
- KScopedSchedulerLock lock{kernel};
- thread->SetState(ThreadState::Runnable);
+ if (system.DebuggerEnabled()) {
+ thread->RequestSuspend(SuspendType::Debug);
}
+
+ // Run our thread.
+ void(thread->Run());
}
} // Anonymous namespace
-// Represents a page used for thread-local storage.
-//
-// Each TLS page contains slots that may be used by processes and threads.
-// Every process and thread is created with a slot in some arbitrary page
-// (whichever page happens to have an available slot).
-class TLSPage {
-public:
- static constexpr std::size_t num_slot_entries =
- Core::Memory::PAGE_SIZE / Core::Memory::TLS_ENTRY_SIZE;
-
- explicit TLSPage(VAddr address) : base_address{address} {}
-
- bool HasAvailableSlots() const {
- return !is_slot_used.all();
- }
-
- VAddr GetBaseAddress() const {
- return base_address;
- }
-
- std::optional<VAddr> ReserveSlot() {
- for (std::size_t i = 0; i < is_slot_used.size(); i++) {
- if (is_slot_used[i]) {
- continue;
- }
-
- is_slot_used[i] = true;
- return base_address + (i * Core::Memory::TLS_ENTRY_SIZE);
- }
-
- return std::nullopt;
- }
-
- void ReleaseSlot(VAddr address) {
- // Ensure that all given addresses are consistent with how TLS pages
- // are intended to be used when releasing slots.
- ASSERT(IsWithinPage(address));
- ASSERT((address % Core::Memory::TLS_ENTRY_SIZE) == 0);
-
- const std::size_t index = (address - base_address) / Core::Memory::TLS_ENTRY_SIZE;
- is_slot_used[index] = false;
- }
-
-private:
- bool IsWithinPage(VAddr address) const {
- return base_address <= address && address < base_address + Core::Memory::PAGE_SIZE;
- }
-
- VAddr base_address;
- std::bitset<num_slot_entries> is_slot_used;
-};
-
-ResultCode KProcess::Initialize(KProcess* process, Core::System& system, std::string process_name,
- ProcessType type) {
+Result KProcess::Initialize(KProcess* process, Core::System& system, std::string process_name,
+ ProcessType type, KResourceLimit* res_limit) {
auto& kernel = system.Kernel();
process->name = std::move(process_name);
-
- process->resource_limit = kernel.GetSystemResourceLimit();
+ process->resource_limit = res_limit;
process->status = ProcessStatus::Created;
process->program_id = 0;
process->process_id = type == ProcessType::KernelInternal ? kernel.CreateNewKernelProcessID()
@@ -143,9 +86,6 @@ ResultCode KProcess::Initialize(KProcess* process, Core::System& system, std::st
kernel.AppendNewProcess(process);
- // Open a reference to the resource limit.
- process->resource_limit->Open();
-
// Clear remaining fields.
process->num_running_threads = 0;
process->is_signaled = false;
@@ -153,6 +93,9 @@ ResultCode KProcess::Initialize(KProcess* process, Core::System& system, std::st
process->is_suspended = false;
process->schedule_count = 0;
+ // Open a reference to the resource limit.
+ process->resource_limit->Open();
+
return ResultSuccess;
}
@@ -217,7 +160,7 @@ bool KProcess::ReleaseUserException(KThread* thread) {
std::addressof(num_waiters),
reinterpret_cast<uintptr_t>(std::addressof(exception_thread)));
next != nullptr) {
- next->SetState(ThreadState::Runnable);
+ next->EndWait(ResultSuccess);
}
KScheduler::SetSchedulerUpdateNeeded(kernel);
@@ -232,7 +175,8 @@ void KProcess::PinCurrentThread(s32 core_id) {
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
// Get the current thread.
- KThread* cur_thread = kernel.Scheduler(static_cast<std::size_t>(core_id)).GetCurrentThread();
+ KThread* cur_thread =
+ kernel.Scheduler(static_cast<std::size_t>(core_id)).GetSchedulerCurrentThread();
// If the thread isn't terminated, pin it.
if (!cur_thread->IsTerminationRequested()) {
@@ -249,7 +193,8 @@ void KProcess::UnpinCurrentThread(s32 core_id) {
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
// Get the current thread.
- KThread* cur_thread = kernel.Scheduler(static_cast<std::size_t>(core_id)).GetCurrentThread();
+ KThread* cur_thread =
+ kernel.Scheduler(static_cast<std::size_t>(core_id)).GetSchedulerCurrentThread();
// Unpin it.
cur_thread->Unpin();
@@ -273,8 +218,8 @@ void KProcess::UnpinThread(KThread* thread) {
KScheduler::SetSchedulerUpdateNeeded(kernel);
}
-ResultCode KProcess::AddSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr address,
- [[maybe_unused]] size_t size) {
+Result KProcess::AddSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr address,
+ [[maybe_unused]] size_t size) {
// Lock ourselves, to prevent concurrent access.
KScopedLightLock lk(state_lock);
@@ -326,15 +271,19 @@ void KProcess::RemoveSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr a
shmem->Close();
}
-void KProcess::RegisterThread(const KThread* thread) {
+void KProcess::RegisterThread(KThread* thread) {
+ KScopedLightLock lk{list_lock};
+
thread_list.push_back(thread);
}
-void KProcess::UnregisterThread(const KThread* thread) {
+void KProcess::UnregisterThread(KThread* thread) {
+ KScopedLightLock lk{list_lock};
+
thread_list.remove(thread);
}
-ResultCode KProcess::Reset() {
+Result KProcess::Reset() {
// Lock the process and the scheduler.
KScopedLightLock lk(state_lock);
KScopedSchedulerLock sl{kernel};
@@ -348,8 +297,51 @@ ResultCode KProcess::Reset() {
return ResultSuccess;
}
-ResultCode KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata,
- std::size_t code_size) {
+Result KProcess::SetActivity(ProcessActivity activity) {
+ // Lock ourselves and the scheduler.
+ KScopedLightLock lk{state_lock};
+ KScopedLightLock list_lk{list_lock};
+ KScopedSchedulerLock sl{kernel};
+
+ // Validate our state.
+ R_UNLESS(status != ProcessStatus::Exiting, ResultInvalidState);
+ R_UNLESS(status != ProcessStatus::Exited, ResultInvalidState);
+
+ // Either pause or resume.
+ if (activity == ProcessActivity::Paused) {
+ // Verify that we're not suspended.
+ if (is_suspended) {
+ return ResultInvalidState;
+ }
+
+ // Suspend all threads.
+ for (auto* thread : GetThreadList()) {
+ thread->RequestSuspend(SuspendType::Process);
+ }
+
+ // Set ourselves as suspended.
+ SetSuspended(true);
+ } else {
+ ASSERT(activity == ProcessActivity::Runnable);
+
+ // Verify that we're suspended.
+ if (!is_suspended) {
+ return ResultInvalidState;
+ }
+
+ // Resume all threads.
+ for (auto* thread : GetThreadList()) {
+ thread->Resume(SuspendType::Process);
+ }
+
+ // Set ourselves as resumed.
+ SetSuspended(false);
+ }
+
+ return ResultSuccess;
+}
+
+Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size) {
program_id = metadata.GetTitleID();
ideal_core = metadata.GetMainThreadCore();
is_64bit_process = metadata.Is64BitProgram();
@@ -364,24 +356,24 @@ ResultCode KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata,
return ResultLimitReached;
}
// Initialize proces address space
- if (const ResultCode result{
- page_table->InitializeForProcess(metadata.GetAddressSpaceType(), false, 0x8000000,
- code_size, KMemoryManager::Pool::Application)};
+ if (const Result result{page_table->InitializeForProcess(metadata.GetAddressSpaceType(), false,
+ 0x8000000, code_size,
+ KMemoryManager::Pool::Application)};
result.IsError()) {
return result;
}
// Map process code region
- if (const ResultCode result{page_table->MapProcessCode(page_table->GetCodeRegionStart(),
- code_size / PageSize, KMemoryState::Code,
- KMemoryPermission::None)};
+ if (const Result result{page_table->MapProcessCode(page_table->GetCodeRegionStart(),
+ code_size / PageSize, KMemoryState::Code,
+ KMemoryPermission::None)};
result.IsError()) {
return result;
}
// Initialize process capabilities
const auto& caps{metadata.GetKernelCapabilities()};
- if (const ResultCode result{
+ if (const Result result{
capabilities.InitializeForUserProcess(caps.data(), caps.size(), *page_table)};
result.IsError()) {
return result;
@@ -401,11 +393,11 @@ ResultCode KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata,
break;
default:
- UNREACHABLE();
+ ASSERT(false);
}
// Create TLS region
- tls_region_address = CreateTLSRegion();
+ R_TRY(this->CreateThreadLocalRegion(std::addressof(tls_region_address)));
memory_reservation.Commit();
return handle_table.Initialize(capabilities.GetHandleTableSize());
@@ -428,11 +420,11 @@ void KProcess::PrepareForTermination() {
ChangeStatus(ProcessStatus::Exiting);
const auto stop_threads = [this](const std::vector<KThread*>& in_thread_list) {
- for (auto& thread : in_thread_list) {
+ for (auto* thread : in_thread_list) {
if (thread->GetOwnerProcess() != this)
continue;
- if (thread == kernel.CurrentScheduler()->GetCurrentThread())
+ if (thread == GetCurrentThreadPointer(kernel))
continue;
// TODO(Subv): When are the other running/ready threads terminated?
@@ -445,7 +437,7 @@ void KProcess::PrepareForTermination() {
stop_threads(kernel.System().GlobalSchedulerContext().GetThreadList());
- FreeTLSRegion(tls_region_address);
+ this->DeleteThreadLocalRegion(tls_region_address);
tls_region_address = 0;
if (resource_limit) {
@@ -457,9 +449,6 @@ void KProcess::PrepareForTermination() {
}
void KProcess::Finalize() {
- // Finalize the handle table and close any open handles.
- handle_table.Finalize();
-
// Free all shared memory infos.
{
auto it = shared_memory_list.begin();
@@ -484,67 +473,156 @@ void KProcess::Finalize() {
resource_limit = nullptr;
}
+ // Finalize the page table.
+ page_table.reset();
+
// Perform inherited finalization.
KAutoObjectWithSlabHeapAndContainer<KProcess, KWorkerTask>::Finalize();
}
-/**
- * Attempts to find a TLS page that contains a free slot for
- * use by a thread.
- *
- * @returns If a page with an available slot is found, then an iterator
- * pointing to the page is returned. Otherwise the end iterator
- * is returned instead.
- */
-static auto FindTLSPageWithAvailableSlots(std::vector<TLSPage>& tls_pages) {
- return std::find_if(tls_pages.begin(), tls_pages.end(),
- [](const auto& page) { return page.HasAvailableSlots(); });
+Result KProcess::CreateThreadLocalRegion(VAddr* out) {
+ KThreadLocalPage* tlp = nullptr;
+ VAddr tlr = 0;
+
+ // See if we can get a region from a partially used TLP.
+ {
+ KScopedSchedulerLock sl{kernel};
+
+ if (auto it = partially_used_tlp_tree.begin(); it != partially_used_tlp_tree.end()) {
+ tlr = it->Reserve();
+ ASSERT(tlr != 0);
+
+ if (it->IsAllUsed()) {
+ tlp = std::addressof(*it);
+ partially_used_tlp_tree.erase(it);
+ fully_used_tlp_tree.insert(*tlp);
+ }
+
+ *out = tlr;
+ return ResultSuccess;
+ }
+ }
+
+ // Allocate a new page.
+ tlp = KThreadLocalPage::Allocate(kernel);
+ R_UNLESS(tlp != nullptr, ResultOutOfMemory);
+ auto tlp_guard = SCOPE_GUARD({ KThreadLocalPage::Free(kernel, tlp); });
+
+ // Initialize the new page.
+ R_TRY(tlp->Initialize(kernel, this));
+
+ // Reserve a TLR.
+ tlr = tlp->Reserve();
+ ASSERT(tlr != 0);
+
+ // Insert into our tree.
+ {
+ KScopedSchedulerLock sl{kernel};
+ if (tlp->IsAllUsed()) {
+ fully_used_tlp_tree.insert(*tlp);
+ } else {
+ partially_used_tlp_tree.insert(*tlp);
+ }
+ }
+
+ // We succeeded!
+ tlp_guard.Cancel();
+ *out = tlr;
+ return ResultSuccess;
}
-VAddr KProcess::CreateTLSRegion() {
- KScopedSchedulerLock lock(kernel);
- if (auto tls_page_iter{FindTLSPageWithAvailableSlots(tls_pages)};
- tls_page_iter != tls_pages.cend()) {
- return *tls_page_iter->ReserveSlot();
+Result KProcess::DeleteThreadLocalRegion(VAddr addr) {
+ KThreadLocalPage* page_to_free = nullptr;
+
+ // Release the region.
+ {
+ KScopedSchedulerLock sl{kernel};
+
+ // Try to find the page in the partially used list.
+ auto it = partially_used_tlp_tree.find_key(Common::AlignDown(addr, PageSize));
+ if (it == partially_used_tlp_tree.end()) {
+ // If we don't find it, it has to be in the fully used list.
+ it = fully_used_tlp_tree.find_key(Common::AlignDown(addr, PageSize));
+ R_UNLESS(it != fully_used_tlp_tree.end(), ResultInvalidAddress);
+
+ // Release the region.
+ it->Release(addr);
+
+ // Move the page out of the fully used list.
+ KThreadLocalPage* tlp = std::addressof(*it);
+ fully_used_tlp_tree.erase(it);
+ if (tlp->IsAllFree()) {
+ page_to_free = tlp;
+ } else {
+ partially_used_tlp_tree.insert(*tlp);
+ }
+ } else {
+ // Release the region.
+ it->Release(addr);
+
+ // Handle the all-free case.
+ KThreadLocalPage* tlp = std::addressof(*it);
+ if (tlp->IsAllFree()) {
+ partially_used_tlp_tree.erase(it);
+ page_to_free = tlp;
+ }
+ }
+ }
+
+ // If we should free the page it was in, do so.
+ if (page_to_free != nullptr) {
+ page_to_free->Finalize();
+
+ KThreadLocalPage::Free(kernel, page_to_free);
}
- Page* const tls_page_ptr{kernel.GetUserSlabHeapPages().Allocate()};
- ASSERT(tls_page_ptr);
+ return ResultSuccess;
+}
- const VAddr start{page_table->GetKernelMapRegionStart()};
- const VAddr size{page_table->GetKernelMapRegionEnd() - start};
- const PAddr tls_map_addr{kernel.System().DeviceMemory().GetPhysicalAddr(tls_page_ptr)};
- const VAddr tls_page_addr{page_table
- ->AllocateAndMapMemory(1, PageSize, true, start, size / PageSize,
- KMemoryState::ThreadLocal,
- KMemoryPermission::UserReadWrite,
- tls_map_addr)
- .ValueOr(0)};
+bool KProcess::InsertWatchpoint(Core::System& system, VAddr addr, u64 size,
+ DebugWatchpointType type) {
+ const auto watch{std::find_if(watchpoints.begin(), watchpoints.end(), [&](const auto& wp) {
+ return wp.type == DebugWatchpointType::None;
+ })};
- ASSERT(tls_page_addr);
+ if (watch == watchpoints.end()) {
+ return false;
+ }
- std::memset(tls_page_ptr, 0, PageSize);
- tls_pages.emplace_back(tls_page_addr);
+ watch->start_address = addr;
+ watch->end_address = addr + size;
+ watch->type = type;
- const auto reserve_result{tls_pages.back().ReserveSlot()};
- ASSERT(reserve_result.has_value());
+ for (VAddr page = Common::AlignDown(addr, PageSize); page < addr + size; page += PageSize) {
+ debug_page_refcounts[page]++;
+ system.Memory().MarkRegionDebug(page, PageSize, true);
+ }
- return *reserve_result;
+ return true;
}
-void KProcess::FreeTLSRegion(VAddr tls_address) {
- KScopedSchedulerLock lock(kernel);
- const VAddr aligned_address = Common::AlignDown(tls_address, Core::Memory::PAGE_SIZE);
- auto iter =
- std::find_if(tls_pages.begin(), tls_pages.end(), [aligned_address](const auto& page) {
- return page.GetBaseAddress() == aligned_address;
- });
+bool KProcess::RemoveWatchpoint(Core::System& system, VAddr addr, u64 size,
+ DebugWatchpointType type) {
+ const auto watch{std::find_if(watchpoints.begin(), watchpoints.end(), [&](const auto& wp) {
+ return wp.start_address == addr && wp.end_address == addr + size && wp.type == type;
+ })};
- // Something has gone very wrong if we're freeing a region
- // with no actual page available.
- ASSERT(iter != tls_pages.cend());
+ if (watch == watchpoints.end()) {
+ return false;
+ }
+
+ watch->start_address = 0;
+ watch->end_address = 0;
+ watch->type = DebugWatchpointType::None;
+
+ for (VAddr page = Common::AlignDown(addr, PageSize); page < addr + size; page += PageSize) {
+ debug_page_refcounts[page]--;
+ if (!debug_page_refcounts[page]) {
+ system.Memory().MarkRegionDebug(page, PageSize, false);
+ }
+ }
- iter->ReleaseSlot(tls_address);
+ return true;
}
void KProcess::LoadModule(CodeSet code_set, VAddr base_addr) {
@@ -567,9 +645,10 @@ bool KProcess::IsSignaled() const {
}
KProcess::KProcess(KernelCore& kernel_)
- : KAutoObjectWithSlabHeapAndContainer{kernel_},
- page_table{std::make_unique<KPageTable>(kernel_.System())}, handle_table{kernel_},
- address_arbiter{kernel_.System()}, condition_var{kernel_.System()}, state_lock{kernel_} {}
+ : KAutoObjectWithSlabHeapAndContainer{kernel_}, page_table{std::make_unique<KPageTable>(
+ kernel_.System())},
+ handle_table{kernel_}, address_arbiter{kernel_.System()}, condition_var{kernel_.System()},
+ state_lock{kernel_}, list_lock{kernel_} {}
KProcess::~KProcess() = default;
@@ -583,7 +662,7 @@ void KProcess::ChangeStatus(ProcessStatus new_status) {
NotifyAvailable();
}
-ResultCode KProcess::AllocateMainThreadStack(std::size_t stack_size) {
+Result KProcess::AllocateMainThreadStack(std::size_t stack_size) {
ASSERT(stack_size);
// The kernel always ensures that the given stack size is page aligned.
diff --git a/src/core/hle/kernel/k_process.h b/src/core/hle/kernel/k_process.h
index 38b446350..d56d73bab 100644
--- a/src/core/hle/kernel/k_process.h
+++ b/src/core/hle/kernel/k_process.h
@@ -1,20 +1,20 @@
-// Copyright 2015 Citra Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: 2015 Citra Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <array>
#include <cstddef>
#include <list>
+#include <map>
#include <string>
-#include <vector>
#include "common/common_types.h"
#include "core/hle/kernel/k_address_arbiter.h"
#include "core/hle/kernel/k_auto_object.h"
#include "core/hle/kernel/k_condition_variable.h"
#include "core/hle/kernel/k_handle_table.h"
#include "core/hle/kernel/k_synchronization_object.h"
+#include "core/hle/kernel/k_thread_local_page.h"
#include "core/hle/kernel/k_worker_task.h"
#include "core/hle/kernel/process_capability.h"
#include "core/hle/kernel/slab_helpers.h"
@@ -63,6 +63,25 @@ enum class ProcessStatus {
DebugBreak,
};
+enum class ProcessActivity : u32 {
+ Runnable,
+ Paused,
+};
+
+enum class DebugWatchpointType : u8 {
+ None = 0,
+ Read = 1 << 0,
+ Write = 1 << 1,
+ ReadOrWrite = Read | Write,
+};
+DECLARE_ENUM_FLAG_OPERATORS(DebugWatchpointType);
+
+struct DebugWatchpoint {
+ VAddr start_address;
+ VAddr end_address;
+ DebugWatchpointType type;
+};
+
class KProcess final : public KAutoObjectWithSlabHeapAndContainer<KProcess, KWorkerTask> {
KERNEL_AUTOOBJECT_TRAITS(KProcess, KSynchronizationObject);
@@ -90,8 +109,8 @@ public:
static constexpr std::size_t RANDOM_ENTROPY_SIZE = 4;
- static ResultCode Initialize(KProcess* process, Core::System& system, std::string process_name,
- ProcessType type);
+ static Result Initialize(KProcess* process, Core::System& system, std::string process_name,
+ ProcessType type, KResourceLimit* res_limit);
/// Gets a reference to the process' page table.
KPageTable& PageTable() {
@@ -113,11 +132,11 @@ public:
return handle_table;
}
- ResultCode SignalToAddress(VAddr address) {
+ Result SignalToAddress(VAddr address) {
return condition_var.SignalToAddress(address);
}
- ResultCode WaitForAddress(Handle handle, VAddr address, u32 tag) {
+ Result WaitForAddress(Handle handle, VAddr address, u32 tag) {
return condition_var.WaitForAddress(handle, address, tag);
}
@@ -125,17 +144,16 @@ public:
return condition_var.Signal(cv_key, count);
}
- ResultCode WaitConditionVariable(VAddr address, u64 cv_key, u32 tag, s64 ns) {
+ Result WaitConditionVariable(VAddr address, u64 cv_key, u32 tag, s64 ns) {
return condition_var.Wait(address, cv_key, tag, ns);
}
- ResultCode SignalAddressArbiter(VAddr address, Svc::SignalType signal_type, s32 value,
- s32 count) {
+ Result SignalAddressArbiter(VAddr address, Svc::SignalType signal_type, s32 value, s32 count) {
return address_arbiter.SignalToAddress(address, signal_type, value, count);
}
- ResultCode WaitAddressArbiter(VAddr address, Svc::ArbitrationType arb_type, s32 value,
- s64 timeout) {
+ Result WaitAddressArbiter(VAddr address, Svc::ArbitrationType arb_type, s32 value,
+ s64 timeout) {
return address_arbiter.WaitForAddress(address, arb_type, value, timeout);
}
@@ -282,17 +300,17 @@ public:
u64 GetTotalPhysicalMemoryUsedWithoutSystemResource() const;
/// Gets the list of all threads created with this process as their owner.
- const std::list<const KThread*>& GetThreadList() const {
+ std::list<KThread*>& GetThreadList() {
return thread_list;
}
/// Registers a thread as being created under this process,
/// adding it to this process' thread list.
- void RegisterThread(const KThread* thread);
+ void RegisterThread(KThread* thread);
/// Unregisters a thread from this process, removing it
/// from this process' thread list.
- void UnregisterThread(const KThread* thread);
+ void UnregisterThread(KThread* thread);
/// Clears the signaled state of the process if and only if it's signaled.
///
@@ -302,7 +320,7 @@ public:
/// @pre The process must be in a signaled state. If this is called on a
/// process instance that is not signaled, ERR_INVALID_STATE will be
/// returned.
- ResultCode Reset();
+ Result Reset();
/**
* Loads process-specifics configuration info with metadata provided
@@ -313,7 +331,7 @@ public:
* @returns ResultSuccess if all relevant metadata was able to be
* loaded and parsed. Otherwise, an error code is returned.
*/
- ResultCode LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size);
+ Result LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size);
/**
* Starts the main application thread for this process.
@@ -347,6 +365,8 @@ public:
void DoWorkerTaskImpl();
+ Result SetActivity(ProcessActivity activity);
+
void PinCurrentThread(s32 core_id);
void UnpinCurrentThread(s32 core_id);
void UnpinThread(KThread* thread);
@@ -355,17 +375,30 @@ public:
return state_lock;
}
- ResultCode AddSharedMemory(KSharedMemory* shmem, VAddr address, size_t size);
+ Result AddSharedMemory(KSharedMemory* shmem, VAddr address, size_t size);
void RemoveSharedMemory(KSharedMemory* shmem, VAddr address, size_t size);
///////////////////////////////////////////////////////////////////////////////////////////////
// Thread-local storage management
// Marks the next available region as used and returns the address of the slot.
- [[nodiscard]] VAddr CreateTLSRegion();
+ [[nodiscard]] Result CreateThreadLocalRegion(VAddr* out);
// Frees a used TLS slot identified by the given address
- void FreeTLSRegion(VAddr tls_address);
+ Result DeleteThreadLocalRegion(VAddr addr);
+
+ ///////////////////////////////////////////////////////////////////////////////////////////////
+ // Debug watchpoint management
+
+ // Attempts to insert a watchpoint into a free slot. Returns false if none are available.
+ bool InsertWatchpoint(Core::System& system, VAddr addr, u64 size, DebugWatchpointType type);
+
+ // Attempts to remove the watchpoint specified by the given parameters.
+ bool RemoveWatchpoint(Core::System& system, VAddr addr, u64 size, DebugWatchpointType type);
+
+ const std::array<DebugWatchpoint, Core::Hardware::NUM_WATCHPOINTS>& GetWatchpoints() const {
+ return watchpoints;
+ }
private:
void PinThread(s32 core_id, KThread* thread) {
@@ -388,7 +421,7 @@ private:
void ChangeStatus(ProcessStatus new_status);
/// Allocates the main thread stack for the process, given the stack size in bytes.
- ResultCode AllocateMainThreadStack(std::size_t stack_size);
+ Result AllocateMainThreadStack(std::size_t stack_size);
/// Memory manager for this process
std::unique_ptr<KPageTable> page_table;
@@ -413,13 +446,6 @@ private:
/// The ideal CPU core for this process, threads are scheduled on this core by default.
u8 ideal_core = 0;
- /// The Thread Local Storage area is allocated as processes create threads,
- /// each TLS area is 0x200 bytes, so one page (0x1000) is split up in 8 parts, and each part
- /// holds the TLS for a specific thread. This vector contains which parts are in use for each
- /// page as a bitmask.
- /// This vector will grow as more pages are allocated for new threads.
- std::vector<TLSPage> tls_pages;
-
/// Contains the parsed process capability descriptors.
ProcessCapabilities capabilities;
@@ -429,7 +455,7 @@ private:
bool is_64bit_process = true;
/// Total running time for the process in ticks.
- u64 total_process_running_time_ticks = 0;
+ std::atomic<u64> total_process_running_time_ticks = 0;
/// Per-process handle table for storing created object handles in.
KHandleTable handle_table;
@@ -449,7 +475,7 @@ private:
std::array<u64, RANDOM_ENTROPY_SIZE> random_entropy{};
/// List of threads that are running with this process as their owner.
- std::list<const KThread*> thread_list;
+ std::list<KThread*> thread_list;
/// List of shared memory that are running with this process as their owner.
std::list<KSharedMemoryInfo*> shared_memory_list;
@@ -478,10 +504,19 @@ private:
std::array<KThread*, Core::Hardware::NUM_CPU_CORES> running_threads{};
std::array<u64, Core::Hardware::NUM_CPU_CORES> running_thread_idle_counts{};
std::array<KThread*, Core::Hardware::NUM_CPU_CORES> pinned_threads{};
+ std::array<DebugWatchpoint, Core::Hardware::NUM_WATCHPOINTS> watchpoints{};
+ std::map<VAddr, u64> debug_page_refcounts;
KThread* exception_thread{};
KLightLock state_lock;
+ KLightLock list_lock;
+
+ using TLPTree =
+ Common::IntrusiveRedBlackTreeBaseTraits<KThreadLocalPage>::TreeType<KThreadLocalPage>;
+ using TLPIterator = TLPTree::iterator;
+ TLPTree fully_used_tlp_tree;
+ TLPTree partially_used_tlp_tree;
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_readable_event.cpp b/src/core/hle/kernel/k_readable_event.cpp
index bf1db10d4..94c5464fe 100644
--- a/src/core/hle/kernel/k_readable_event.cpp
+++ b/src/core/hle/kernel/k_readable_event.cpp
@@ -1,6 +1,5 @@
-// Copyright 2021 yuzu emulator team
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#include "common/assert.h"
#include "core/hle/kernel/k_event.h"
@@ -28,7 +27,7 @@ void KReadableEvent::Destroy() {
}
}
-ResultCode KReadableEvent::Signal() {
+Result KReadableEvent::Signal() {
KScopedSchedulerLock lk{kernel};
if (!is_signaled) {
@@ -39,13 +38,13 @@ ResultCode KReadableEvent::Signal() {
return ResultSuccess;
}
-ResultCode KReadableEvent::Clear() {
+Result KReadableEvent::Clear() {
Reset();
return ResultSuccess;
}
-ResultCode KReadableEvent::Reset() {
+Result KReadableEvent::Reset() {
KScopedSchedulerLock lk{kernel};
if (!is_signaled) {
diff --git a/src/core/hle/kernel/k_readable_event.h b/src/core/hle/kernel/k_readable_event.h
index 149fa78dd..18dcad289 100644
--- a/src/core/hle/kernel/k_readable_event.h
+++ b/src/core/hle/kernel/k_readable_event.h
@@ -1,6 +1,5 @@
-// Copyright 2021 yuzu emulator team
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
@@ -34,9 +33,9 @@ public:
bool IsSignaled() const override;
void Destroy() override;
- ResultCode Signal();
- ResultCode Clear();
- ResultCode Reset();
+ Result Signal();
+ Result Clear();
+ Result Reset();
private:
bool is_signaled{};
diff --git a/src/core/hle/kernel/k_resource_limit.cpp b/src/core/hle/kernel/k_resource_limit.cpp
index 0c4bba66b..010dcf99e 100644
--- a/src/core/hle/kernel/k_resource_limit.cpp
+++ b/src/core/hle/kernel/k_resource_limit.cpp
@@ -1,8 +1,8 @@
-// Copyright 2020 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#include "common/assert.h"
+#include "core/core.h"
#include "core/core_timing.h"
#include "core/hle/kernel/k_resource_limit.h"
#include "core/hle/kernel/svc_results.h"
@@ -73,7 +73,7 @@ s64 KResourceLimit::GetFreeValue(LimitableResource which) const {
return value;
}
-ResultCode KResourceLimit::SetLimitValue(LimitableResource which, s64 value) {
+Result KResourceLimit::SetLimitValue(LimitableResource which, s64 value) {
const auto index = static_cast<std::size_t>(which);
KScopedLightLock lk(lock);
R_UNLESS(current_values[index] <= value, ResultInvalidState);
@@ -151,4 +151,22 @@ void KResourceLimit::Release(LimitableResource which, s64 value, s64 hint) {
}
}
+KResourceLimit* CreateResourceLimitForProcess(Core::System& system, s64 physical_memory_size) {
+ auto* resource_limit = KResourceLimit::Create(system.Kernel());
+ resource_limit->Initialize(&system.CoreTiming());
+
+ // Initialize default resource limit values.
+ // TODO(bunnei): These values are the system defaults, the limits for service processes are
+ // lower. These should use the correct limit values.
+
+ ASSERT(resource_limit->SetLimitValue(LimitableResource::PhysicalMemory, physical_memory_size)
+ .IsSuccess());
+ ASSERT(resource_limit->SetLimitValue(LimitableResource::Threads, 800).IsSuccess());
+ ASSERT(resource_limit->SetLimitValue(LimitableResource::Events, 900).IsSuccess());
+ ASSERT(resource_limit->SetLimitValue(LimitableResource::TransferMemory, 200).IsSuccess());
+ ASSERT(resource_limit->SetLimitValue(LimitableResource::Sessions, 1133).IsSuccess());
+
+ return resource_limit;
+}
+
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_resource_limit.h b/src/core/hle/kernel/k_resource_limit.h
index fab6005ff..65c98c979 100644
--- a/src/core/hle/kernel/k_resource_limit.h
+++ b/src/core/hle/kernel/k_resource_limit.h
@@ -1,6 +1,5 @@
-// Copyright 2020 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
@@ -9,7 +8,7 @@
#include "core/hle/kernel/k_light_condition_variable.h"
#include "core/hle/kernel/k_light_lock.h"
-union ResultCode;
+union Result;
namespace Core::Timing {
class CoreTiming;
@@ -47,7 +46,7 @@ public:
s64 GetPeakValue(LimitableResource which) const;
s64 GetFreeValue(LimitableResource which) const;
- ResultCode SetLimitValue(LimitableResource which, s64 value);
+ Result SetLimitValue(LimitableResource which, s64 value);
bool Reserve(LimitableResource which, s64 value);
bool Reserve(LimitableResource which, s64 value, s64 timeout);
@@ -67,4 +66,7 @@ private:
KLightConditionVariable cond_var;
const Core::Timing::CoreTiming* core_timing{};
};
+
+KResourceLimit* CreateResourceLimitForProcess(Core::System& system, s64 physical_memory_size);
+
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp
index c96520828..c34ce7a17 100644
--- a/src/core/hle/kernel/k_scheduler.cpp
+++ b/src/core/hle/kernel/k_scheduler.cpp
@@ -1,9 +1,5 @@
-// Copyright 2020 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
-
-// This file references various implementation details from Atmosphere, an open-source firmware for
-// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
+// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#include <bit>
@@ -22,7 +18,6 @@
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/physical_core.h"
-#include "core/hle/kernel/time_manager.h"
namespace Kernel {
@@ -32,69 +27,185 @@ static void IncrementScheduledCount(Kernel::KThread* thread) {
}
}
-void KScheduler::RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedule) {
- auto scheduler = kernel.CurrentScheduler();
+KScheduler::KScheduler(KernelCore& kernel_) : kernel{kernel_} {
+ m_switch_fiber = std::make_shared<Common::Fiber>([this] {
+ while (true) {
+ ScheduleImplFiber();
+ }
+ });
+
+ m_state.needs_scheduling = true;
+}
+
+KScheduler::~KScheduler() = default;
- u32 current_core{0xF};
- bool must_context_switch{};
- if (scheduler) {
- current_core = scheduler->core_id;
- // TODO(bunnei): Should be set to true when we deprecate single core
- must_context_switch = !kernel.IsPhantomModeForSingleCore();
+void KScheduler::SetInterruptTaskRunnable() {
+ m_state.interrupt_task_runnable = true;
+ m_state.needs_scheduling = true;
+}
+
+void KScheduler::RequestScheduleOnInterrupt() {
+ m_state.needs_scheduling = true;
+
+ if (CanSchedule(kernel)) {
+ ScheduleOnInterrupt();
}
+}
- while (cores_pending_reschedule != 0) {
- const auto core = static_cast<u32>(std::countr_zero(cores_pending_reschedule));
- ASSERT(core < Core::Hardware::NUM_CPU_CORES);
- if (!must_context_switch || core != current_core) {
- auto& phys_core = kernel.PhysicalCore(core);
- phys_core.Interrupt();
- }
- cores_pending_reschedule &= ~(1ULL << core);
+void KScheduler::DisableScheduling(KernelCore& kernel) {
+ ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() >= 0);
+ GetCurrentThread(kernel).DisableDispatch();
+}
+
+void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling) {
+ ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() >= 1);
+
+ auto* scheduler{kernel.CurrentScheduler()};
+
+ if (!scheduler || kernel.IsPhantomModeForSingleCore()) {
+ KScheduler::RescheduleCores(kernel, cores_needing_scheduling);
+ KScheduler::RescheduleCurrentHLEThread(kernel);
+ return;
}
- for (std::size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; ++core_id) {
- if (kernel.PhysicalCore(core_id).IsInterrupted()) {
- KInterruptManager::HandleInterrupt(kernel, static_cast<s32>(core_id));
- }
+ scheduler->RescheduleOtherCores(cores_needing_scheduling);
+
+ if (GetCurrentThread(kernel).GetDisableDispatchCount() > 1) {
+ GetCurrentThread(kernel).EnableDispatch();
+ } else {
+ scheduler->RescheduleCurrentCore();
}
+}
+
+void KScheduler::RescheduleCurrentHLEThread(KernelCore& kernel) {
+ // HACK: we cannot schedule from this thread, it is not a core thread
+ ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() == 1);
+
+ // Special case to ensure dummy threads that are waiting block
+ GetCurrentThread(kernel).IfDummyThreadTryWait();
- if (must_context_switch) {
- auto core_scheduler = kernel.CurrentScheduler();
- kernel.ExitSVCProfile();
- core_scheduler->RescheduleCurrentCore();
- kernel.EnterSVCProfile();
+ ASSERT(GetCurrentThread(kernel).GetState() != ThreadState::Waiting);
+ GetCurrentThread(kernel).EnableDispatch();
+}
+
+u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) {
+ if (IsSchedulerUpdateNeeded(kernel)) {
+ return UpdateHighestPriorityThreadsImpl(kernel);
+ } else {
+ return 0;
}
}
+void KScheduler::Schedule() {
+ ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() == 1);
+ ASSERT(m_core_id == GetCurrentCoreId(kernel));
+
+ ScheduleImpl();
+}
+
+void KScheduler::ScheduleOnInterrupt() {
+ GetCurrentThread(kernel).DisableDispatch();
+ Schedule();
+ GetCurrentThread(kernel).EnableDispatch();
+}
+
+void KScheduler::PreemptSingleCore() {
+ GetCurrentThread(kernel).DisableDispatch();
+
+ auto* thread = GetCurrentThreadPointer(kernel);
+ auto& previous_scheduler = kernel.Scheduler(thread->GetCurrentCore());
+ previous_scheduler.Unload(thread);
+
+ Common::Fiber::YieldTo(thread->GetHostContext(), *m_switch_fiber);
+
+ GetCurrentThread(kernel).EnableDispatch();
+}
+
+void KScheduler::RescheduleCurrentCore() {
+ ASSERT(!kernel.IsPhantomModeForSingleCore());
+ ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() == 1);
+
+ GetCurrentThread(kernel).EnableDispatch();
+
+ if (m_state.needs_scheduling.load()) {
+ // Disable interrupts, and then check again if rescheduling is needed.
+ // KScopedInterruptDisable intr_disable;
+
+ kernel.CurrentScheduler()->RescheduleCurrentCoreImpl();
+ }
+}
+
+void KScheduler::RescheduleCurrentCoreImpl() {
+ // Check that scheduling is needed.
+ if (m_state.needs_scheduling.load()) [[likely]] {
+ GetCurrentThread(kernel).DisableDispatch();
+ Schedule();
+ GetCurrentThread(kernel).EnableDispatch();
+ }
+}
+
+void KScheduler::Initialize(KThread* main_thread, KThread* idle_thread, s32 core_id) {
+ // Set core ID/idle thread/interrupt task manager.
+ m_core_id = core_id;
+ m_idle_thread = idle_thread;
+ // m_state.idle_thread_stack = m_idle_thread->GetStackTop();
+ // m_state.interrupt_task_manager = &kernel.GetInterruptTaskManager();
+
+ // Insert the main thread into the priority queue.
+ // {
+ // KScopedSchedulerLock lk{kernel};
+ // GetPriorityQueue(kernel).PushBack(GetCurrentThreadPointer(kernel));
+ // SetSchedulerUpdateNeeded(kernel);
+ // }
+
+ // Bind interrupt handler.
+ // kernel.GetInterruptManager().BindHandler(
+ // GetSchedulerInterruptHandler(kernel), KInterruptName::Scheduler, m_core_id,
+ // KInterruptController::PriorityLevel::Scheduler, false, false);
+
+ // Set the current thread.
+ m_current_thread = main_thread;
+}
+
+void KScheduler::Activate() {
+ ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() == 1);
+
+ // m_state.should_count_idle = KTargetSystem::IsDebugMode();
+ m_is_active = true;
+ RescheduleCurrentCore();
+}
+
+void KScheduler::OnThreadStart() {
+ GetCurrentThread(kernel).EnableDispatch();
+}
+
u64 KScheduler::UpdateHighestPriorityThread(KThread* highest_thread) {
- KScopedSpinLock lk{guard};
- if (KThread* prev_highest_thread = state.highest_priority_thread;
- prev_highest_thread != highest_thread) {
- if (prev_highest_thread != nullptr) {
+ if (KThread* prev_highest_thread = m_state.highest_priority_thread;
+ prev_highest_thread != highest_thread) [[likely]] {
+ if (prev_highest_thread != nullptr) [[likely]] {
IncrementScheduledCount(prev_highest_thread);
- prev_highest_thread->SetLastScheduledTick(system.CoreTiming().GetCPUTicks());
+ prev_highest_thread->SetLastScheduledTick(kernel.System().CoreTiming().GetCPUTicks());
}
- if (state.should_count_idle) {
- if (highest_thread != nullptr) {
+ if (m_state.should_count_idle) {
+ if (highest_thread != nullptr) [[likely]] {
if (KProcess* process = highest_thread->GetOwnerProcess(); process != nullptr) {
- process->SetRunningThread(core_id, highest_thread, state.idle_count);
+ process->SetRunningThread(m_core_id, highest_thread, m_state.idle_count);
}
} else {
- state.idle_count++;
+ m_state.idle_count++;
}
}
- state.highest_priority_thread = highest_thread;
- state.needs_scheduling.store(true);
- return (1ULL << core_id);
+ m_state.highest_priority_thread = highest_thread;
+ m_state.needs_scheduling = true;
+ return (1ULL << m_core_id);
} else {
return 0;
}
}
u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) {
- ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+ ASSERT(IsSchedulerLockedByCurrentThread(kernel));
// Clear that we need to update.
ClearSchedulerUpdateNeeded(kernel);
@@ -103,18 +214,20 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) {
KThread* top_threads[Core::Hardware::NUM_CPU_CORES];
auto& priority_queue = GetPriorityQueue(kernel);
- /// We want to go over all cores, finding the highest priority thread and determining if
- /// scheduling is needed for that core.
+ // We want to go over all cores, finding the highest priority thread and determining if
+ // scheduling is needed for that core.
for (size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
KThread* top_thread = priority_queue.GetScheduledFront(static_cast<s32>(core_id));
if (top_thread != nullptr) {
- // If the thread has no waiters, we need to check if the process has a thread pinned.
- if (top_thread->GetNumKernelWaiters() == 0) {
- if (KProcess* parent = top_thread->GetOwnerProcess(); parent != nullptr) {
- if (KThread* pinned = parent->GetPinnedThread(static_cast<s32>(core_id));
- pinned != nullptr && pinned != top_thread) {
- // We prefer our parent's pinned thread if possible. However, we also don't
- // want to schedule un-runnable threads.
+ // We need to check if the thread's process has a pinned thread.
+ if (KProcess* parent = top_thread->GetOwnerProcess()) {
+ // Check that there's a pinned thread other than the current top thread.
+ if (KThread* pinned = parent->GetPinnedThread(static_cast<s32>(core_id));
+ pinned != nullptr && pinned != top_thread) {
+ // We need to prefer threads with kernel waiters to the pinned thread.
+ if (top_thread->GetNumKernelWaiters() ==
+ 0 /* && top_thread != parent->GetExceptionThread() */) {
+ // If the pinned thread is runnable, use it.
if (pinned->GetRawState() == ThreadState::Runnable) {
top_thread = pinned;
} else {
@@ -134,7 +247,8 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) {
// Idle cores are bad. We're going to try to migrate threads to each idle core in turn.
while (idle_cores != 0) {
- const auto core_id = static_cast<u32>(std::countr_zero(idle_cores));
+ const s32 core_id = static_cast<s32>(std::countr_zero(idle_cores));
+
if (KThread* suggested = priority_queue.GetSuggestedFront(core_id); suggested != nullptr) {
s32 migration_candidates[Core::Hardware::NUM_CPU_CORES];
size_t num_candidates = 0;
@@ -155,7 +269,6 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) {
// The suggested thread isn't bound to its core, so we can migrate it!
suggested->SetActiveCore(core_id);
priority_queue.ChangeCore(suggested_core, suggested);
-
top_threads[core_id] = suggested;
cores_needing_scheduling |=
kernel.Scheduler(core_id).UpdateHighestPriorityThread(top_threads[core_id]);
@@ -188,7 +301,6 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) {
// Perform the migration.
suggested->SetActiveCore(core_id);
priority_queue.ChangeCore(candidate_core, suggested);
-
top_threads[core_id] = suggested;
cores_needing_scheduling |=
kernel.Scheduler(core_id).UpdateHighestPriorityThread(
@@ -205,24 +317,210 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) {
return cores_needing_scheduling;
}
+void KScheduler::SwitchThread(KThread* next_thread) {
+ KProcess* const cur_process = kernel.CurrentProcess();
+ KThread* const cur_thread = GetCurrentThreadPointer(kernel);
+
+ // We never want to schedule a null thread, so use the idle thread if we don't have a next.
+ if (next_thread == nullptr) {
+ next_thread = m_idle_thread;
+ }
+
+ if (next_thread->GetCurrentCore() != m_core_id) {
+ next_thread->SetCurrentCore(m_core_id);
+ }
+
+ // If we're not actually switching thread, there's nothing to do.
+ if (next_thread == cur_thread) {
+ return;
+ }
+
+ // Next thread is now known not to be nullptr, and must not be dispatchable.
+ ASSERT(next_thread->GetDisableDispatchCount() == 1);
+ ASSERT(!next_thread->IsDummyThread());
+
+ // Update the CPU time tracking variables.
+ const s64 prev_tick = m_last_context_switch_time;
+ const s64 cur_tick = kernel.System().CoreTiming().GetCPUTicks();
+ const s64 tick_diff = cur_tick - prev_tick;
+ cur_thread->AddCpuTime(m_core_id, tick_diff);
+ if (cur_process != nullptr) {
+ cur_process->UpdateCPUTimeTicks(tick_diff);
+ }
+ m_last_context_switch_time = cur_tick;
+
+ // Update our previous thread.
+ if (cur_process != nullptr) {
+ if (!cur_thread->IsTerminationRequested() && cur_thread->GetActiveCore() == m_core_id)
+ [[likely]] {
+ m_state.prev_thread = cur_thread;
+ } else {
+ m_state.prev_thread = nullptr;
+ }
+ }
+
+ // Switch the current process, if we're switching processes.
+ // if (KProcess *next_process = next_thread->GetOwnerProcess(); next_process != cur_process) {
+ // KProcess::Switch(cur_process, next_process);
+ // }
+
+ // Set the new thread.
+ SetCurrentThread(kernel, next_thread);
+ m_current_thread = next_thread;
+
+ // Set the new Thread Local region.
+ // cpu::SwitchThreadLocalRegion(GetInteger(next_thread->GetThreadLocalRegionAddress()));
+}
+
+void KScheduler::ScheduleImpl() {
+ // First, clear the needs scheduling bool.
+ m_state.needs_scheduling.store(false, std::memory_order_seq_cst);
+
+ // Load the appropriate thread pointers for scheduling.
+ KThread* const cur_thread{GetCurrentThreadPointer(kernel)};
+ KThread* highest_priority_thread{m_state.highest_priority_thread};
+
+ // Check whether there are runnable interrupt tasks.
+ if (m_state.interrupt_task_runnable) {
+ // The interrupt task is runnable.
+ // We want to switch to the interrupt task/idle thread.
+ highest_priority_thread = nullptr;
+ }
+
+ // If there aren't, we want to check if the highest priority thread is the same as the current
+ // thread.
+ if (highest_priority_thread == cur_thread) {
+ // If they're the same, then we can just return.
+ return;
+ }
+
+ // The highest priority thread is not the same as the current thread.
+ // Jump to the switcher and continue executing from there.
+ m_switch_cur_thread = cur_thread;
+ m_switch_highest_priority_thread = highest_priority_thread;
+ m_switch_from_schedule = true;
+ Common::Fiber::YieldTo(cur_thread->host_context, *m_switch_fiber);
+
+ // Returning from ScheduleImpl occurs after this thread has been scheduled again.
+}
+
+void KScheduler::ScheduleImplFiber() {
+ KThread* const cur_thread{m_switch_cur_thread};
+ KThread* highest_priority_thread{m_switch_highest_priority_thread};
+
+ // If we're not coming from scheduling (i.e., we came from SC preemption),
+ // we should restart the scheduling loop directly. Not accurate to HOS.
+ if (!m_switch_from_schedule) {
+ goto retry;
+ }
+
+ // Mark that we are not coming from scheduling anymore.
+ m_switch_from_schedule = false;
+
+ // Save the original thread context.
+ Unload(cur_thread);
+
+ // The current thread's context has been entirely taken care of.
+ // Now we want to loop until we successfully switch the thread context.
+ while (true) {
+ // We're starting to try to do the context switch.
+ // Check if the highest priority thread is null.
+ if (!highest_priority_thread) {
+ // The next thread is nullptr!
+
+ // Switch to the idle thread. Note: HOS treats idling as a special case for
+ // performance. This is not *required* for yuzu's purposes, and for singlecore
+ // compatibility, we can just move the logic that would go here into the execution
+ // of the idle thread. If we ever remove singlecore, we should implement this
+ // accurately to HOS.
+ highest_priority_thread = m_idle_thread;
+ }
+
+ // We want to try to lock the highest priority thread's context.
+ // Try to take it.
+ while (!highest_priority_thread->context_guard.try_lock()) {
+ // The highest priority thread's context is already locked.
+ // Check if we need scheduling. If we don't, we can retry directly.
+ if (m_state.needs_scheduling.load(std::memory_order_seq_cst)) {
+ // If we do, another core is interfering, and we must start again.
+ goto retry;
+ }
+ }
+
+ // It's time to switch the thread.
+ // Switch to the highest priority thread.
+ SwitchThread(highest_priority_thread);
+
+ // Check if we need scheduling. If we do, then we can't complete the switch and should
+ // retry.
+ if (m_state.needs_scheduling.load(std::memory_order_seq_cst)) {
+ // Our switch failed.
+ // We should unlock the thread context, and then retry.
+ highest_priority_thread->context_guard.unlock();
+ goto retry;
+ } else {
+ break;
+ }
+
+ retry:
+
+ // We failed to successfully do the context switch, and need to retry.
+ // Clear needs_scheduling.
+ m_state.needs_scheduling.store(false, std::memory_order_seq_cst);
+
+ // Refresh the highest priority thread.
+ highest_priority_thread = m_state.highest_priority_thread;
+ }
+
+ // Reload the guest thread context.
+ Reload(highest_priority_thread);
+
+ // Reload the host thread.
+ Common::Fiber::YieldTo(m_switch_fiber, *highest_priority_thread->host_context);
+}
+
+void KScheduler::Unload(KThread* thread) {
+ auto& cpu_core = kernel.System().ArmInterface(m_core_id);
+ cpu_core.SaveContext(thread->GetContext32());
+ cpu_core.SaveContext(thread->GetContext64());
+ // Save the TPIDR_EL0 system register in case it was modified.
+ thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0());
+ cpu_core.ClearExclusiveState();
+
+ // Check if the thread is terminated by checking the DPC flags.
+ if ((thread->GetStackParameters().dpc_flags & static_cast<u32>(DpcFlag::Terminated)) == 0) {
+ // The thread isn't terminated, so we want to unlock it.
+ thread->context_guard.unlock();
+ }
+}
+
+void KScheduler::Reload(KThread* thread) {
+ auto& cpu_core = kernel.System().ArmInterface(m_core_id);
+ cpu_core.LoadContext(thread->GetContext32());
+ cpu_core.LoadContext(thread->GetContext64());
+ cpu_core.SetTlsAddress(thread->GetTLSAddress());
+ cpu_core.SetTPIDR_EL0(thread->GetTPIDR_EL0());
+ cpu_core.LoadWatchpointArray(thread->GetOwnerProcess()->GetWatchpoints());
+ cpu_core.ClearExclusiveState();
+}
+
void KScheduler::ClearPreviousThread(KernelCore& kernel, KThread* thread) {
- ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+ ASSERT(IsSchedulerLockedByCurrentThread(kernel));
for (size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; ++i) {
// Get an atomic reference to the core scheduler's previous thread.
- std::atomic_ref<KThread*> prev_thread(kernel.Scheduler(static_cast<s32>(i)).prev_thread);
- static_assert(std::atomic_ref<KThread*>::is_always_lock_free);
+ auto& prev_thread{kernel.Scheduler(i).m_state.prev_thread};
// Atomically clear the previous thread if it's our target.
KThread* compare = thread;
- prev_thread.compare_exchange_strong(compare, nullptr);
+ prev_thread.compare_exchange_strong(compare, nullptr, std::memory_order_seq_cst);
}
}
void KScheduler::OnThreadStateChanged(KernelCore& kernel, KThread* thread, ThreadState old_state) {
- ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+ ASSERT(IsSchedulerLockedByCurrentThread(kernel));
// Check if the state has changed, because if it hasn't there's nothing to do.
- const auto cur_state = thread->GetRawState();
+ const ThreadState cur_state = thread->GetRawState();
if (cur_state == old_state) {
return;
}
@@ -242,12 +540,12 @@ void KScheduler::OnThreadStateChanged(KernelCore& kernel, KThread* thread, Threa
}
void KScheduler::OnThreadPriorityChanged(KernelCore& kernel, KThread* thread, s32 old_priority) {
- ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+ ASSERT(IsSchedulerLockedByCurrentThread(kernel));
// If the thread is runnable, we want to change its priority in the queue.
if (thread->GetRawState() == ThreadState::Runnable) {
GetPriorityQueue(kernel).ChangePriority(old_priority,
- thread == kernel.GetCurrentEmuThread(), thread);
+ thread == GetCurrentThreadPointer(kernel), thread);
IncrementScheduledCount(thread);
SetSchedulerUpdateNeeded(kernel);
}
@@ -255,7 +553,7 @@ void KScheduler::OnThreadPriorityChanged(KernelCore& kernel, KThread* thread, s3
void KScheduler::OnThreadAffinityMaskChanged(KernelCore& kernel, KThread* thread,
const KAffinityMask& old_affinity, s32 old_core) {
- ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+ ASSERT(IsSchedulerLockedByCurrentThread(kernel));
// If the thread is runnable, we want to change its affinity in the queue.
if (thread->GetRawState() == ThreadState::Runnable) {
@@ -265,15 +563,14 @@ void KScheduler::OnThreadAffinityMaskChanged(KernelCore& kernel, KThread* thread
}
}
-void KScheduler::RotateScheduledQueue(s32 cpu_core_id, s32 priority) {
- ASSERT(system.GlobalSchedulerContext().IsLocked());
+void KScheduler::RotateScheduledQueue(KernelCore& kernel, s32 core_id, s32 priority) {
+ ASSERT(IsSchedulerLockedByCurrentThread(kernel));
// Get a reference to the priority queue.
- auto& kernel = system.Kernel();
auto& priority_queue = GetPriorityQueue(kernel);
// Rotate the front of the queue to the end.
- KThread* top_thread = priority_queue.GetScheduledFront(cpu_core_id, priority);
+ KThread* top_thread = priority_queue.GetScheduledFront(core_id, priority);
KThread* next_thread = nullptr;
if (top_thread != nullptr) {
next_thread = priority_queue.MoveToScheduledBack(top_thread);
@@ -285,7 +582,7 @@ void KScheduler::RotateScheduledQueue(s32 cpu_core_id, s32 priority) {
// While we have a suggested thread, try to migrate it!
{
- KThread* suggested = priority_queue.GetSuggestedFront(cpu_core_id, priority);
+ KThread* suggested = priority_queue.GetSuggestedFront(core_id, priority);
while (suggested != nullptr) {
// Check if the suggested thread is the top thread on its core.
const s32 suggested_core = suggested->GetActiveCore();
@@ -306,7 +603,7 @@ void KScheduler::RotateScheduledQueue(s32 cpu_core_id, s32 priority) {
// to the front of the queue.
if (top_on_suggested_core == nullptr ||
top_on_suggested_core->GetPriority() >= HighestCoreMigrationAllowedPriority) {
- suggested->SetActiveCore(cpu_core_id);
+ suggested->SetActiveCore(core_id);
priority_queue.ChangeCore(suggested_core, suggested, true);
IncrementScheduledCount(suggested);
break;
@@ -314,22 +611,21 @@ void KScheduler::RotateScheduledQueue(s32 cpu_core_id, s32 priority) {
}
// Get the next suggestion.
- suggested = priority_queue.GetSamePriorityNext(cpu_core_id, suggested);
+ suggested = priority_queue.GetSamePriorityNext(core_id, suggested);
}
}
// Now that we might have migrated a thread with the same priority, check if we can do better.
-
{
- KThread* best_thread = priority_queue.GetScheduledFront(cpu_core_id);
- if (best_thread == GetCurrentThread()) {
- best_thread = priority_queue.GetScheduledNext(cpu_core_id, best_thread);
+ KThread* best_thread = priority_queue.GetScheduledFront(core_id);
+ if (best_thread == GetCurrentThreadPointer(kernel)) {
+ best_thread = priority_queue.GetScheduledNext(core_id, best_thread);
}
// If the best thread we can choose has a priority the same or worse than ours, try to
// migrate a higher priority thread.
if (best_thread != nullptr && best_thread->GetPriority() >= priority) {
- KThread* suggested = priority_queue.GetSuggestedFront(cpu_core_id);
+ KThread* suggested = priority_queue.GetSuggestedFront(core_id);
while (suggested != nullptr) {
// If the suggestion's priority is the same as ours, don't bother.
if (suggested->GetPriority() >= best_thread->GetPriority()) {
@@ -348,7 +644,7 @@ void KScheduler::RotateScheduledQueue(s32 cpu_core_id, s32 priority) {
if (top_on_suggested_core == nullptr ||
top_on_suggested_core->GetPriority() >=
HighestCoreMigrationAllowedPriority) {
- suggested->SetActiveCore(cpu_core_id);
+ suggested->SetActiveCore(core_id);
priority_queue.ChangeCore(suggested_core, suggested, true);
IncrementScheduledCount(suggested);
break;
@@ -356,7 +652,7 @@ void KScheduler::RotateScheduledQueue(s32 cpu_core_id, s32 priority) {
}
// Get the next suggestion.
- suggested = priority_queue.GetSuggestedNext(cpu_core_id, suggested);
+ suggested = priority_queue.GetSuggestedNext(core_id, suggested);
}
}
}
@@ -365,71 +661,13 @@ void KScheduler::RotateScheduledQueue(s32 cpu_core_id, s32 priority) {
SetSchedulerUpdateNeeded(kernel);
}
-bool KScheduler::CanSchedule(KernelCore& kernel) {
- return kernel.GetCurrentEmuThread()->GetDisableDispatchCount() <= 1;
-}
-
-bool KScheduler::IsSchedulerUpdateNeeded(const KernelCore& kernel) {
- return kernel.GlobalSchedulerContext().scheduler_update_needed.load(std::memory_order_acquire);
-}
-
-void KScheduler::SetSchedulerUpdateNeeded(KernelCore& kernel) {
- kernel.GlobalSchedulerContext().scheduler_update_needed.store(true, std::memory_order_release);
-}
-
-void KScheduler::ClearSchedulerUpdateNeeded(KernelCore& kernel) {
- kernel.GlobalSchedulerContext().scheduler_update_needed.store(false, std::memory_order_release);
-}
-
-void KScheduler::DisableScheduling(KernelCore& kernel) {
- // If we are shutting down the kernel, none of this is relevant anymore.
- if (kernel.IsShuttingDown()) {
- return;
- }
-
- ASSERT(GetCurrentThreadPointer(kernel)->GetDisableDispatchCount() >= 0);
- GetCurrentThreadPointer(kernel)->DisableDispatch();
-}
-
-void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling) {
- // If we are shutting down the kernel, none of this is relevant anymore.
- if (kernel.IsShuttingDown()) {
- return;
- }
-
- auto* current_thread = GetCurrentThreadPointer(kernel);
-
- ASSERT(current_thread->GetDisableDispatchCount() >= 1);
-
- if (current_thread->GetDisableDispatchCount() > 1) {
- current_thread->EnableDispatch();
- } else {
- RescheduleCores(kernel, cores_needing_scheduling);
- }
-
- // Special case to ensure dummy threads that are waiting block.
- current_thread->IfDummyThreadTryWait();
-}
-
-u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) {
- if (IsSchedulerUpdateNeeded(kernel)) {
- return UpdateHighestPriorityThreadsImpl(kernel);
- } else {
- return 0;
- }
-}
-
-KSchedulerPriorityQueue& KScheduler::GetPriorityQueue(KernelCore& kernel) {
- return kernel.GlobalSchedulerContext().priority_queue;
-}
-
void KScheduler::YieldWithoutCoreMigration(KernelCore& kernel) {
// Validate preconditions.
ASSERT(CanSchedule(kernel));
ASSERT(kernel.CurrentProcess() != nullptr);
// Get the current thread and process.
- KThread& cur_thread = Kernel::GetCurrentThread(kernel);
+ KThread& cur_thread = GetCurrentThread(kernel);
KProcess& cur_process = *kernel.CurrentProcess();
// If the thread's yield count matches, there's nothing for us to do.
@@ -442,7 +680,7 @@ void KScheduler::YieldWithoutCoreMigration(KernelCore& kernel) {
// Perform the yield.
{
- KScopedSchedulerLock lock(kernel);
+ KScopedSchedulerLock sl{kernel};
const auto cur_state = cur_thread.GetRawState();
if (cur_state == ThreadState::Runnable) {
@@ -468,7 +706,7 @@ void KScheduler::YieldWithCoreMigration(KernelCore& kernel) {
ASSERT(kernel.CurrentProcess() != nullptr);
// Get the current thread and process.
- KThread& cur_thread = Kernel::GetCurrentThread(kernel);
+ KThread& cur_thread = GetCurrentThread(kernel);
KProcess& cur_process = *kernel.CurrentProcess();
// If the thread's yield count matches, there's nothing for us to do.
@@ -481,7 +719,7 @@ void KScheduler::YieldWithCoreMigration(KernelCore& kernel) {
// Perform the yield.
{
- KScopedSchedulerLock lock(kernel);
+ KScopedSchedulerLock sl{kernel};
const auto cur_state = cur_thread.GetRawState();
if (cur_state == ThreadState::Runnable) {
@@ -501,7 +739,7 @@ void KScheduler::YieldWithCoreMigration(KernelCore& kernel) {
if (KThread* running_on_suggested_core =
(suggested_core >= 0)
- ? kernel.Scheduler(suggested_core).state.highest_priority_thread
+ ? kernel.Scheduler(suggested_core).m_state.highest_priority_thread
: nullptr;
running_on_suggested_core != suggested) {
// If the current thread's priority is higher than our suggestion's we prefer
@@ -556,7 +794,7 @@ void KScheduler::YieldToAnyThread(KernelCore& kernel) {
ASSERT(kernel.CurrentProcess() != nullptr);
// Get the current thread and process.
- KThread& cur_thread = Kernel::GetCurrentThread(kernel);
+ KThread& cur_thread = GetCurrentThread(kernel);
KProcess& cur_process = *kernel.CurrentProcess();
// If the thread's yield count matches, there's nothing for us to do.
@@ -569,7 +807,7 @@ void KScheduler::YieldToAnyThread(KernelCore& kernel) {
// Perform the yield.
{
- KScopedSchedulerLock lock(kernel);
+ KScopedSchedulerLock sl{kernel};
const auto cur_state = cur_thread.GetRawState();
if (cur_state == ThreadState::Runnable) {
@@ -626,219 +864,19 @@ void KScheduler::YieldToAnyThread(KernelCore& kernel) {
}
}
-KScheduler::KScheduler(Core::System& system_, s32 core_id_) : system{system_}, core_id{core_id_} {
- switch_fiber = std::make_shared<Common::Fiber>(OnSwitch, this);
- state.needs_scheduling.store(true);
- state.interrupt_task_thread_runnable = false;
- state.should_count_idle = false;
- state.idle_count = 0;
- state.idle_thread_stack = nullptr;
- state.highest_priority_thread = nullptr;
-}
-
-void KScheduler::Finalize() {
- if (idle_thread) {
- idle_thread->Close();
- idle_thread = nullptr;
- }
-}
-
-KScheduler::~KScheduler() {
- ASSERT(!idle_thread);
-}
-
-KThread* KScheduler::GetCurrentThread() const {
- if (auto result = current_thread.load(); result) {
- return result;
- }
- return idle_thread;
-}
-
-u64 KScheduler::GetLastContextSwitchTicks() const {
- return last_context_switch_time;
-}
-
-void KScheduler::RescheduleCurrentCore() {
- ASSERT(GetCurrentThread()->GetDisableDispatchCount() == 1);
-
- auto& phys_core = system.Kernel().PhysicalCore(core_id);
- if (phys_core.IsInterrupted()) {
- phys_core.ClearInterrupt();
+void KScheduler::RescheduleOtherCores(u64 cores_needing_scheduling) {
+ if (const u64 core_mask = cores_needing_scheduling & ~(1ULL << m_core_id); core_mask != 0) {
+ RescheduleCores(kernel, core_mask);
}
-
- guard.Lock();
- if (state.needs_scheduling.load()) {
- Schedule();
- } else {
- GetCurrentThread()->EnableDispatch();
- guard.Unlock();
- }
-}
-
-void KScheduler::OnThreadStart() {
- SwitchContextStep2();
}
-void KScheduler::Unload(KThread* thread) {
- ASSERT(thread);
-
- LOG_TRACE(Kernel, "core {}, unload thread {}", core_id, thread ? thread->GetName() : "nullptr");
-
- if (thread->IsCallingSvc()) {
- thread->ClearIsCallingSvc();
- }
-
- auto& physical_core = system.Kernel().PhysicalCore(core_id);
- if (!physical_core.IsInitialized()) {
- return;
- }
-
- Core::ARM_Interface& cpu_core = physical_core.ArmInterface();
- cpu_core.SaveContext(thread->GetContext32());
- cpu_core.SaveContext(thread->GetContext64());
- // Save the TPIDR_EL0 system register in case it was modified.
- thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0());
- cpu_core.ClearExclusiveState();
-
- if (!thread->IsTerminationRequested() && thread->GetActiveCore() == core_id) {
- prev_thread = thread;
- } else {
- prev_thread = nullptr;
- }
-
- thread->context_guard.Unlock();
-}
-
-void KScheduler::Reload(KThread* thread) {
- LOG_TRACE(Kernel, "core {}, reload thread {}", core_id, thread->GetName());
-
- Core::ARM_Interface& cpu_core = system.ArmInterface(core_id);
- cpu_core.LoadContext(thread->GetContext32());
- cpu_core.LoadContext(thread->GetContext64());
- cpu_core.SetTlsAddress(thread->GetTLSAddress());
- cpu_core.SetTPIDR_EL0(thread->GetTPIDR_EL0());
- cpu_core.ClearExclusiveState();
-}
-
-void KScheduler::SwitchContextStep2() {
- // Load context of new thread
- Reload(GetCurrentThread());
-
- RescheduleCurrentCore();
-}
-
-void KScheduler::ScheduleImpl() {
- KThread* previous_thread = GetCurrentThread();
- KThread* next_thread = state.highest_priority_thread;
-
- state.needs_scheduling.store(false);
-
- // We never want to schedule a null thread, so use the idle thread if we don't have a next.
- if (next_thread == nullptr) {
- next_thread = idle_thread;
- }
-
- if (next_thread->GetCurrentCore() != core_id) {
- next_thread->SetCurrentCore(core_id);
- }
-
- // We never want to schedule a dummy thread, as these are only used by host threads for locking.
- if (next_thread->GetThreadType() == ThreadType::Dummy) {
- ASSERT_MSG(false, "Dummy threads should never be scheduled!");
- next_thread = idle_thread;
- }
-
- // If we're not actually switching thread, there's nothing to do.
- if (next_thread == current_thread.load()) {
- previous_thread->EnableDispatch();
- guard.Unlock();
- return;
- }
-
- // Update the CPU time tracking variables.
- KProcess* const previous_process = system.Kernel().CurrentProcess();
- UpdateLastContextSwitchTime(previous_thread, previous_process);
-
- // Save context for previous thread
- Unload(previous_thread);
-
- std::shared_ptr<Common::Fiber>* old_context;
- old_context = &previous_thread->GetHostContext();
-
- // Set the new thread.
- current_thread.store(next_thread);
-
- guard.Unlock();
-
- Common::Fiber::YieldTo(*old_context, *switch_fiber);
- /// When a thread wakes up, the scheduler may have changed to other in another core.
- auto& next_scheduler = *system.Kernel().CurrentScheduler();
- next_scheduler.SwitchContextStep2();
-}
-
-void KScheduler::OnSwitch(void* this_scheduler) {
- KScheduler* sched = static_cast<KScheduler*>(this_scheduler);
- sched->SwitchToCurrent();
-}
-
-void KScheduler::SwitchToCurrent() {
- while (true) {
- {
- KScopedSpinLock lk{guard};
- current_thread.store(state.highest_priority_thread);
- state.needs_scheduling.store(false);
+void KScheduler::RescheduleCores(KernelCore& kernel, u64 core_mask) {
+ // Send IPI
+ for (size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
+ if (core_mask & (1ULL << i)) {
+ kernel.PhysicalCore(i).Interrupt();
}
- const auto is_switch_pending = [this] {
- KScopedSpinLock lk{guard};
- return state.needs_scheduling.load();
- };
- do {
- auto next_thread = current_thread.load();
- if (next_thread != nullptr) {
- const auto locked = next_thread->context_guard.TryLock();
- if (state.needs_scheduling.load()) {
- next_thread->context_guard.Unlock();
- break;
- }
- if (next_thread->GetActiveCore() != core_id) {
- next_thread->context_guard.Unlock();
- break;
- }
- if (!locked) {
- continue;
- }
- }
- auto thread = next_thread ? next_thread : idle_thread;
- Common::Fiber::YieldTo(switch_fiber, *thread->GetHostContext());
- } while (!is_switch_pending());
}
}
-void KScheduler::UpdateLastContextSwitchTime(KThread* thread, KProcess* process) {
- const u64 prev_switch_ticks = last_context_switch_time;
- const u64 most_recent_switch_ticks = system.CoreTiming().GetCPUTicks();
- const u64 update_ticks = most_recent_switch_ticks - prev_switch_ticks;
-
- if (thread != nullptr) {
- thread->AddCpuTime(core_id, update_ticks);
- }
-
- if (process != nullptr) {
- process->UpdateCPUTimeTicks(update_ticks);
- }
-
- last_context_switch_time = most_recent_switch_ticks;
-}
-
-void KScheduler::Initialize() {
- idle_thread = KThread::Create(system.Kernel());
- ASSERT(KThread::InitializeIdleThread(system, idle_thread, core_id).IsSuccess());
- idle_thread->SetName(fmt::format("IdleThread:{}", core_id));
-}
-
-KScopedSchedulerLock::KScopedSchedulerLock(KernelCore& kernel)
- : KScopedLock(kernel.GlobalSchedulerContext().SchedulerLock()) {}
-
-KScopedSchedulerLock::~KScopedSchedulerLock() = default;
-
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_scheduler.h b/src/core/hle/kernel/k_scheduler.h
index 82fcd99e7..534321d8d 100644
--- a/src/core/hle/kernel/k_scheduler.h
+++ b/src/core/hle/kernel/k_scheduler.h
@@ -1,6 +1,5 @@
-// Copyright 2020 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
@@ -12,6 +11,7 @@
#include "core/hle/kernel/k_scheduler_lock.h"
#include "core/hle/kernel/k_scoped_lock.h"
#include "core/hle/kernel/k_spin_lock.h"
+#include "core/hle/kernel/k_thread.h"
namespace Common {
class Fiber;
@@ -24,188 +24,150 @@ class System;
namespace Kernel {
class KernelCore;
+class KInterruptTaskManager;
class KProcess;
-class SchedulerLock;
class KThread;
+class KScopedDisableDispatch;
+class KScopedSchedulerLock;
+class KScopedSchedulerLockAndSleep;
class KScheduler final {
public:
- explicit KScheduler(Core::System& system_, s32 core_id_);
- ~KScheduler();
-
- void Finalize();
+ YUZU_NON_COPYABLE(KScheduler);
+ YUZU_NON_MOVEABLE(KScheduler);
- /// Reschedules to the next available thread (call after current thread is suspended)
- void RescheduleCurrentCore();
+ using LockType = KAbstractSchedulerLock<KScheduler>;
- /// Reschedules cores pending reschedule, to be called on EnableScheduling.
- static void RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedule);
+ explicit KScheduler(KernelCore& kernel);
+ ~KScheduler();
- /// The next two are for SingleCore Only.
- /// Unload current thread before preempting core.
+ void Initialize(KThread* main_thread, KThread* idle_thread, s32 core_id);
+ void Activate();
+ void OnThreadStart();
void Unload(KThread* thread);
-
- /// Reload current thread after core preemption.
void Reload(KThread* thread);
- /// Gets the current running thread
- [[nodiscard]] KThread* GetCurrentThread() const;
+ void SetInterruptTaskRunnable();
+ void RequestScheduleOnInterrupt();
+ void PreemptSingleCore();
- /// Gets the idle thread
- [[nodiscard]] KThread* GetIdleThread() const {
- return idle_thread;
+ u64 GetIdleCount() {
+ return m_state.idle_count;
}
- /// Returns true if the scheduler is idle
- [[nodiscard]] bool IsIdle() const {
- return GetCurrentThread() == idle_thread;
+ KThread* GetIdleThread() const {
+ return m_idle_thread;
}
- /// Gets the timestamp for the last context switch in ticks.
- [[nodiscard]] u64 GetLastContextSwitchTicks() const;
-
- [[nodiscard]] bool ContextSwitchPending() const {
- return state.needs_scheduling.load(std::memory_order_relaxed);
+ bool IsIdle() const {
+ return m_current_thread.load() == m_idle_thread;
}
- void Initialize();
-
- void OnThreadStart();
+ KThread* GetPreviousThread() const {
+ return m_state.prev_thread;
+ }
- [[nodiscard]] std::shared_ptr<Common::Fiber>& ControlContext() {
- return switch_fiber;
+ KThread* GetSchedulerCurrentThread() const {
+ return m_current_thread.load();
}
- [[nodiscard]] const std::shared_ptr<Common::Fiber>& ControlContext() const {
- return switch_fiber;
+ s64 GetLastContextSwitchTime() const {
+ return m_last_context_switch_time;
}
- [[nodiscard]] u64 UpdateHighestPriorityThread(KThread* highest_thread);
+ // Static public API.
+ static bool CanSchedule(KernelCore& kernel) {
+ return GetCurrentThread(kernel).GetDisableDispatchCount() == 0;
+ }
+ static bool IsSchedulerLockedByCurrentThread(KernelCore& kernel) {
+ return kernel.GlobalSchedulerContext().scheduler_lock.IsLockedByCurrentThread();
+ }
- /**
- * Takes a thread and moves it to the back of the it's priority list.
- *
- * @note This operation can be redundant and no scheduling is changed if marked as so.
- */
- static void YieldWithoutCoreMigration(KernelCore& kernel);
+ static bool IsSchedulerUpdateNeeded(KernelCore& kernel) {
+ return kernel.GlobalSchedulerContext().scheduler_update_needed;
+ }
+ static void SetSchedulerUpdateNeeded(KernelCore& kernel) {
+ kernel.GlobalSchedulerContext().scheduler_update_needed = true;
+ }
+ static void ClearSchedulerUpdateNeeded(KernelCore& kernel) {
+ kernel.GlobalSchedulerContext().scheduler_update_needed = false;
+ }
- /**
- * Takes a thread and moves it to the back of the it's priority list.
- * Afterwards, tries to pick a suggested thread from the suggested queue that has worse time or
- * a better priority than the next thread in the core.
- *
- * @note This operation can be redundant and no scheduling is changed if marked as so.
- */
- static void YieldWithCoreMigration(KernelCore& kernel);
+ static void DisableScheduling(KernelCore& kernel);
+ static void EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling);
- /**
- * Takes a thread and moves it out of the scheduling queue.
- * and into the suggested queue. If no thread can be scheduled afterwards in that core,
- * a suggested thread is obtained instead.
- *
- * @note This operation can be redundant and no scheduling is changed if marked as so.
- */
- static void YieldToAnyThread(KernelCore& kernel);
+ static u64 UpdateHighestPriorityThreads(KernelCore& kernel);
static void ClearPreviousThread(KernelCore& kernel, KThread* thread);
- /// Notify the scheduler a thread's status has changed.
static void OnThreadStateChanged(KernelCore& kernel, KThread* thread, ThreadState old_state);
-
- /// Notify the scheduler a thread's priority has changed.
static void OnThreadPriorityChanged(KernelCore& kernel, KThread* thread, s32 old_priority);
-
- /// Notify the scheduler a thread's core and/or affinity mask has changed.
static void OnThreadAffinityMaskChanged(KernelCore& kernel, KThread* thread,
const KAffinityMask& old_affinity, s32 old_core);
- static bool CanSchedule(KernelCore& kernel);
- static bool IsSchedulerUpdateNeeded(const KernelCore& kernel);
- static void SetSchedulerUpdateNeeded(KernelCore& kernel);
- static void ClearSchedulerUpdateNeeded(KernelCore& kernel);
- static void DisableScheduling(KernelCore& kernel);
- static void EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling);
- [[nodiscard]] static u64 UpdateHighestPriorityThreads(KernelCore& kernel);
+ static void RotateScheduledQueue(KernelCore& kernel, s32 core_id, s32 priority);
+ static void RescheduleCores(KernelCore& kernel, u64 cores_needing_scheduling);
+
+ static void YieldWithoutCoreMigration(KernelCore& kernel);
+ static void YieldWithCoreMigration(KernelCore& kernel);
+ static void YieldToAnyThread(KernelCore& kernel);
private:
- friend class GlobalSchedulerContext;
-
- /**
- * Takes care of selecting the new scheduled threads in three steps:
- *
- * 1. First a thread is selected from the top of the priority queue. If no thread
- * is obtained then we move to step two, else we are done.
- *
- * 2. Second we try to get a suggested thread that's not assigned to any core or
- * that is not the top thread in that core.
- *
- * 3. Third is no suggested thread is found, we do a second pass and pick a running
- * thread in another core and swap it with its current thread.
- *
- * returns the cores needing scheduling.
- */
- [[nodiscard]] static u64 UpdateHighestPriorityThreadsImpl(KernelCore& kernel);
-
- [[nodiscard]] static KSchedulerPriorityQueue& GetPriorityQueue(KernelCore& kernel);
-
- void RotateScheduledQueue(s32 cpu_core_id, s32 priority);
-
- void Schedule() {
- ASSERT(GetCurrentThread()->GetDisableDispatchCount() == 1);
- this->ScheduleImpl();
+ // Static private API.
+ static KSchedulerPriorityQueue& GetPriorityQueue(KernelCore& kernel) {
+ return kernel.GlobalSchedulerContext().priority_queue;
}
+ static u64 UpdateHighestPriorityThreadsImpl(KernelCore& kernel);
- /// Switches the CPU's active thread context to that of the specified thread
- void ScheduleImpl();
-
- /// When a thread wakes up, it must run this through it's new scheduler
- void SwitchContextStep2();
+ static void RescheduleCurrentHLEThread(KernelCore& kernel);
- /**
- * Called on every context switch to update the internal timestamp
- * This also updates the running time ticks for the given thread and
- * process using the following difference:
- *
- * ticks += most_recent_ticks - last_context_switch_ticks
- *
- * The internal tick timestamp for the scheduler is simply the
- * most recent tick count retrieved. No special arithmetic is
- * applied to it.
- */
- void UpdateLastContextSwitchTime(KThread* thread, KProcess* process);
+ // Instanced private API.
+ void ScheduleImpl();
+ void ScheduleImplFiber();
+ void SwitchThread(KThread* next_thread);
- static void OnSwitch(void* this_scheduler);
- void SwitchToCurrent();
+ void Schedule();
+ void ScheduleOnInterrupt();
- KThread* prev_thread{};
- std::atomic<KThread*> current_thread{};
+ void RescheduleOtherCores(u64 cores_needing_scheduling);
+ void RescheduleCurrentCore();
+ void RescheduleCurrentCoreImpl();
- KThread* idle_thread{};
+ u64 UpdateHighestPriorityThread(KThread* thread);
- std::shared_ptr<Common::Fiber> switch_fiber{};
+private:
+ friend class KScopedDisableDispatch;
struct SchedulingState {
- std::atomic<bool> needs_scheduling{};
- bool interrupt_task_thread_runnable{};
- bool should_count_idle{};
- u64 idle_count{};
- KThread* highest_priority_thread{};
- void* idle_thread_stack{};
+ std::atomic<bool> needs_scheduling{false};
+ bool interrupt_task_runnable{false};
+ bool should_count_idle{false};
+ u64 idle_count{0};
+ KThread* highest_priority_thread{nullptr};
+ void* idle_thread_stack{nullptr};
+ std::atomic<KThread*> prev_thread{nullptr};
+ KInterruptTaskManager* interrupt_task_manager{nullptr};
};
- SchedulingState state;
-
- Core::System& system;
- u64 last_context_switch_time{};
- const s32 core_id;
-
- KSpinLock guard{};
+ KernelCore& kernel;
+ SchedulingState m_state;
+ bool m_is_active{false};
+ s32 m_core_id{0};
+ s64 m_last_context_switch_time{0};
+ KThread* m_idle_thread{nullptr};
+ std::atomic<KThread*> m_current_thread{nullptr};
+
+ std::shared_ptr<Common::Fiber> m_switch_fiber{};
+ KThread* m_switch_cur_thread{};
+ KThread* m_switch_highest_priority_thread{};
+ bool m_switch_from_schedule{};
};
-class [[nodiscard]] KScopedSchedulerLock : KScopedLock<GlobalSchedulerContext::LockType> {
+class KScopedSchedulerLock : public KScopedLock<KScheduler::LockType> {
public:
- explicit KScopedSchedulerLock(KernelCore& kernel);
- ~KScopedSchedulerLock();
+ explicit KScopedSchedulerLock(KernelCore& kernel)
+ : KScopedLock(kernel.GlobalSchedulerContext().scheduler_lock) {}
+ ~KScopedSchedulerLock() = default;
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_scheduler_lock.h b/src/core/hle/kernel/k_scheduler_lock.h
index 93c47f1b1..73314b45e 100644
--- a/src/core/hle/kernel/k_scheduler_lock.h
+++ b/src/core/hle/kernel/k_scheduler_lock.h
@@ -1,13 +1,15 @@
-// Copyright 2020 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
+#include <atomic>
#include "common/assert.h"
+#include "core/hle/kernel/k_interrupt_manager.h"
#include "core/hle/kernel/k_spin_lock.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h"
+#include "core/hle/kernel/physical_core.h"
namespace Kernel {
@@ -75,7 +77,7 @@ private:
KernelCore& kernel;
KAlignedSpinLock spin_lock{};
s32 lock_count{};
- KThread* owner_thread{};
+ std::atomic<KThread*> owner_thread{};
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_scoped_lock.h b/src/core/hle/kernel/k_scoped_lock.h
index 89a7ffe49..857e21156 100644
--- a/src/core/hle/kernel/k_scoped_lock.h
+++ b/src/core/hle/kernel/k_scoped_lock.h
@@ -1,9 +1,5 @@
-// Copyright 2020 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
-
-// This file references various implementation details from Atmosphere, an open-source firmware for
-// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
+// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
diff --git a/src/core/hle/kernel/k_scoped_resource_reservation.h b/src/core/hle/kernel/k_scoped_resource_reservation.h
index 07272075d..436bcf9fe 100644
--- a/src/core/hle/kernel/k_scoped_resource_reservation.h
+++ b/src/core/hle/kernel/k_scoped_resource_reservation.h
@@ -1,9 +1,5 @@
-// Copyright 2021 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
-
-// This file references various implementation details from Atmosphere, an open-source firmware for
-// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
diff --git a/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h b/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h
index 2995c492d..76c095e69 100644
--- a/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h
+++ b/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h
@@ -1,9 +1,5 @@
-// Copyright 2020 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
-
-// This file references various implementation details from Atmosphere, an open-source firmware for
-// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
+// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
diff --git a/src/core/hle/kernel/k_server_port.cpp b/src/core/hle/kernel/k_server_port.cpp
index 433fc98e1..e968f26ad 100644
--- a/src/core/hle/kernel/k_server_port.cpp
+++ b/src/core/hle/kernel/k_server_port.cpp
@@ -1,6 +1,5 @@
-// Copyright 2021 yuzu emulator team
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#include <tuple>
#include "common/assert.h"
@@ -62,6 +61,12 @@ void KServerPort::Destroy() {
// Close our reference to our parent.
parent->Close();
+
+ // Release host emulation members.
+ session_handler.reset();
+
+ // Ensure that the global list tracking server objects does not hold on to a reference.
+ kernel.UnregisterServerObject(this);
}
bool KServerPort::IsSignaled() const {
diff --git a/src/core/hle/kernel/k_server_port.h b/src/core/hle/kernel/k_server_port.h
index 6302d5e61..fd4f4bd20 100644
--- a/src/core/hle/kernel/k_server_port.h
+++ b/src/core/hle/kernel/k_server_port.h
@@ -1,6 +1,5 @@
-// Copyright 2021 yuzu emulator team
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
@@ -30,11 +29,11 @@ public:
/// Whether or not this server port has an HLE handler available.
bool HasSessionRequestHandler() const {
- return session_handler != nullptr;
+ return !session_handler.expired();
}
/// Gets the HLE handler for this port.
- SessionRequestHandlerPtr GetSessionRequestHandler() const {
+ SessionRequestHandlerWeakPtr GetSessionRequestHandler() const {
return session_handler;
}
@@ -42,7 +41,7 @@ public:
* Sets the HLE handler template for the port. ServerSessions crated by connecting to this port
* will inherit a reference to this handler.
*/
- void SetSessionHandler(SessionRequestHandlerPtr&& handler) {
+ void SetSessionHandler(SessionRequestHandlerWeakPtr&& handler) {
session_handler = std::move(handler);
}
@@ -66,7 +65,7 @@ private:
void CleanupSessions();
SessionList session_list;
- SessionRequestHandlerPtr session_handler;
+ SessionRequestHandlerWeakPtr session_handler;
KPort* parent{};
};
diff --git a/src/core/hle/kernel/k_server_session.cpp b/src/core/hle/kernel/k_server_session.cpp
index 4d94eb9cf..802c646a6 100644
--- a/src/core/hle/kernel/k_server_session.cpp
+++ b/src/core/hle/kernel/k_server_session.cpp
@@ -1,6 +1,5 @@
-// Copyright 2019 yuzu emulator team
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#include <tuple>
#include <utility>
@@ -27,10 +26,7 @@ namespace Kernel {
KServerSession::KServerSession(KernelCore& kernel_) : KSynchronizationObject{kernel_} {}
-KServerSession::~KServerSession() {
- // Ensure that the global list tracking server sessions does not hold on to a reference.
- kernel.UnregisterServerSession(this);
-}
+KServerSession::~KServerSession() = default;
void KServerSession::Initialize(KSession* parent_session_, std::string&& name_,
std::shared_ptr<SessionRequestManager> manager_) {
@@ -49,6 +45,12 @@ void KServerSession::Destroy() {
parent->OnServerClosed();
parent->Close();
+
+ // Release host emulation members.
+ manager.reset();
+
+ // Ensure that the global list tracking server objects does not hold on to a reference.
+ kernel.UnregisterServerObject(this);
}
void KServerSession::OnClientClosed() {
@@ -77,7 +79,7 @@ std::size_t KServerSession::NumDomainRequestHandlers() const {
return manager->DomainHandlerCount();
}
-ResultCode KServerSession::HandleDomainSyncRequest(Kernel::HLERequestContext& context) {
+Result KServerSession::HandleDomainSyncRequest(Kernel::HLERequestContext& context) {
if (!context.HasDomainMessageHeader()) {
return ResultSuccess;
}
@@ -95,10 +97,15 @@ ResultCode KServerSession::HandleDomainSyncRequest(Kernel::HLERequestContext& co
"object_id {} is too big! This probably means a recent service call "
"to {} needed to return a new interface!",
object_id, name);
- UNREACHABLE();
+ ASSERT(false);
return ResultSuccess; // Ignore error if asserts are off
}
- return manager->DomainHandler(object_id - 1)->HandleSyncRequest(*this, context);
+ if (auto strong_ptr = manager->DomainHandler(object_id - 1).lock()) {
+ return strong_ptr->HandleSyncRequest(*this, context);
+ } else {
+ ASSERT(false);
+ return ResultSuccess;
+ }
case IPC::DomainMessageHeader::CommandType::CloseVirtualHandle: {
LOG_DEBUG(IPC, "CloseVirtualHandle, object_id=0x{:08X}", object_id);
@@ -116,7 +123,7 @@ ResultCode KServerSession::HandleDomainSyncRequest(Kernel::HLERequestContext& co
return ResultSuccess;
}
-ResultCode KServerSession::QueueSyncRequest(KThread* thread, Core::Memory::Memory& memory) {
+Result KServerSession::QueueSyncRequest(KThread* thread, Core::Memory::Memory& memory) {
u32* cmd_buf{reinterpret_cast<u32*>(memory.GetPointer(thread->GetTLSAddress()))};
auto context = std::make_shared<HLERequestContext>(kernel, memory, this, thread);
@@ -136,8 +143,8 @@ ResultCode KServerSession::QueueSyncRequest(KThread* thread, Core::Memory::Memor
return ResultSuccess;
}
-ResultCode KServerSession::CompleteSyncRequest(HLERequestContext& context) {
- ResultCode result = ResultSuccess;
+Result KServerSession::CompleteSyncRequest(HLERequestContext& context) {
+ Result result = ResultSuccess;
// If the session has been converted to a domain, handle the domain request
if (manager->HasSessionRequestHandler(context)) {
@@ -166,8 +173,8 @@ ResultCode KServerSession::CompleteSyncRequest(HLERequestContext& context) {
return result;
}
-ResultCode KServerSession::HandleSyncRequest(KThread* thread, Core::Memory::Memory& memory,
- Core::Timing::CoreTiming& core_timing) {
+Result KServerSession::HandleSyncRequest(KThread* thread, Core::Memory::Memory& memory,
+ Core::Timing::CoreTiming& core_timing) {
return QueueSyncRequest(thread, memory);
}
diff --git a/src/core/hle/kernel/k_server_session.h b/src/core/hle/kernel/k_server_session.h
index 5b76bf17c..6d0821945 100644
--- a/src/core/hle/kernel/k_server_session.h
+++ b/src/core/hle/kernel/k_server_session.h
@@ -1,6 +1,5 @@
-// Copyright 2019 yuzu emulator team
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
@@ -74,10 +73,10 @@ public:
* @param memory Memory context to handle the sync request under.
* @param core_timing Core timing context to schedule the request event under.
*
- * @returns ResultCode from the operation.
+ * @returns Result from the operation.
*/
- ResultCode HandleSyncRequest(KThread* thread, Core::Memory::Memory& memory,
- Core::Timing::CoreTiming& core_timing);
+ Result HandleSyncRequest(KThread* thread, Core::Memory::Memory& memory,
+ Core::Timing::CoreTiming& core_timing);
/// Adds a new domain request handler to the collection of request handlers within
/// this ServerSession instance.
@@ -104,14 +103,14 @@ public:
private:
/// Queues a sync request from the emulated application.
- ResultCode QueueSyncRequest(KThread* thread, Core::Memory::Memory& memory);
+ Result QueueSyncRequest(KThread* thread, Core::Memory::Memory& memory);
/// Completes a sync request from the emulated application.
- ResultCode CompleteSyncRequest(HLERequestContext& context);
+ Result CompleteSyncRequest(HLERequestContext& context);
/// Handles a SyncRequest to a domain, forwarding the request to the proper object or closing an
/// object handle.
- ResultCode HandleDomainSyncRequest(Kernel::HLERequestContext& context);
+ Result HandleDomainSyncRequest(Kernel::HLERequestContext& context);
/// This session's HLE request handlers
std::shared_ptr<SessionRequestManager> manager;
diff --git a/src/core/hle/kernel/k_session.cpp b/src/core/hle/kernel/k_session.cpp
index a64b56b9e..ee05aa282 100644
--- a/src/core/hle/kernel/k_session.cpp
+++ b/src/core/hle/kernel/k_session.cpp
@@ -1,6 +1,5 @@
-// Copyright 2021 yuzu emulator team
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#include "core/hle/kernel/k_client_port.h"
#include "core/hle/kernel/k_client_session.h"
diff --git a/src/core/hle/kernel/k_session.h b/src/core/hle/kernel/k_session.h
index 62c328a68..c6ead403b 100644
--- a/src/core/hle/kernel/k_session.h
+++ b/src/core/hle/kernel/k_session.h
@@ -1,6 +1,5 @@
-// Copyright 2021 yuzu emulator team
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
diff --git a/src/core/hle/kernel/k_shared_memory.cpp b/src/core/hle/kernel/k_shared_memory.cpp
index 51d7538ca..8ff1545b6 100644
--- a/src/core/hle/kernel/k_shared_memory.cpp
+++ b/src/core/hle/kernel/k_shared_memory.cpp
@@ -1,6 +1,5 @@
-// Copyright 2014 Citra Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: 2014 Citra Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#include "common/assert.h"
#include "core/core.h"
@@ -18,12 +17,10 @@ KSharedMemory::~KSharedMemory() {
kernel.GetSystemResourceLimit()->Release(LimitableResource::PhysicalMemory, size);
}
-ResultCode KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* owner_process_,
- KPageLinkedList&& page_list_,
- Svc::MemoryPermission owner_permission_,
- Svc::MemoryPermission user_permission_,
- PAddr physical_address_, std::size_t size_,
- std::string name_) {
+Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* owner_process_,
+ KPageGroup&& page_list_, Svc::MemoryPermission owner_permission_,
+ Svc::MemoryPermission user_permission_, PAddr physical_address_,
+ std::size_t size_, std::string name_) {
// Set members.
owner_process = owner_process_;
device_memory = &device_memory_;
@@ -67,8 +64,8 @@ void KSharedMemory::Finalize() {
KAutoObjectWithSlabHeapAndContainer<KSharedMemory, KAutoObjectWithList>::Finalize();
}
-ResultCode KSharedMemory::Map(KProcess& target_process, VAddr address, std::size_t map_size,
- Svc::MemoryPermission permissions) {
+Result KSharedMemory::Map(KProcess& target_process, VAddr address, std::size_t map_size,
+ Svc::MemoryPermission permissions) {
const u64 page_count{(map_size + PageSize - 1) / PageSize};
if (page_list.GetNumPages() != page_count) {
@@ -86,7 +83,7 @@ ResultCode KSharedMemory::Map(KProcess& target_process, VAddr address, std::size
ConvertToKMemoryPermission(permissions));
}
-ResultCode KSharedMemory::Unmap(KProcess& target_process, VAddr address, std::size_t unmap_size) {
+Result KSharedMemory::Unmap(KProcess& target_process, VAddr address, std::size_t unmap_size) {
const u64 page_count{(unmap_size + PageSize - 1) / PageSize};
if (page_list.GetNumPages() != page_count) {
diff --git a/src/core/hle/kernel/k_shared_memory.h b/src/core/hle/kernel/k_shared_memory.h
index 81de36136..34cb98456 100644
--- a/src/core/hle/kernel/k_shared_memory.h
+++ b/src/core/hle/kernel/k_shared_memory.h
@@ -1,6 +1,5 @@
-// Copyright 2014 Citra Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: 2014 Citra Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
@@ -9,7 +8,7 @@
#include "common/common_types.h"
#include "core/device_memory.h"
#include "core/hle/kernel/k_memory_block.h"
-#include "core/hle/kernel/k_page_linked_list.h"
+#include "core/hle/kernel/k_page_group.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/slab_helpers.h"
#include "core/hle/result.h"
@@ -26,10 +25,10 @@ public:
explicit KSharedMemory(KernelCore& kernel_);
~KSharedMemory() override;
- ResultCode Initialize(Core::DeviceMemory& device_memory_, KProcess* owner_process_,
- KPageLinkedList&& page_list_, Svc::MemoryPermission owner_permission_,
- Svc::MemoryPermission user_permission_, PAddr physical_address_,
- std::size_t size_, std::string name_);
+ Result Initialize(Core::DeviceMemory& device_memory_, KProcess* owner_process_,
+ KPageGroup&& page_list_, Svc::MemoryPermission owner_permission_,
+ Svc::MemoryPermission user_permission_, PAddr physical_address_,
+ std::size_t size_, std::string name_);
/**
* Maps a shared memory block to an address in the target process' address space
@@ -38,8 +37,8 @@ public:
* @param map_size Size of the shared memory block to map
* @param permissions Memory block map permissions (specified by SVC field)
*/
- ResultCode Map(KProcess& target_process, VAddr address, std::size_t map_size,
- Svc::MemoryPermission permissions);
+ Result Map(KProcess& target_process, VAddr address, std::size_t map_size,
+ Svc::MemoryPermission permissions);
/**
* Unmaps a shared memory block from an address in the target process' address space
@@ -47,7 +46,7 @@ public:
* @param address Address in system memory to unmap shared memory block
* @param unmap_size Size of the shared memory block to unmap
*/
- ResultCode Unmap(KProcess& target_process, VAddr address, std::size_t unmap_size);
+ Result Unmap(KProcess& target_process, VAddr address, std::size_t unmap_size);
/**
* Gets a pointer to the shared memory block
@@ -77,7 +76,7 @@ public:
private:
Core::DeviceMemory* device_memory;
KProcess* owner_process{};
- KPageLinkedList page_list;
+ KPageGroup page_list;
Svc::MemoryPermission owner_permission{};
Svc::MemoryPermission user_permission{};
PAddr physical_address{};
diff --git a/src/core/hle/kernel/k_shared_memory_info.h b/src/core/hle/kernel/k_shared_memory_info.h
index 20bc19f46..e43db8515 100644
--- a/src/core/hle/kernel/k_shared_memory_info.h
+++ b/src/core/hle/kernel/k_shared_memory_info.h
@@ -1,6 +1,5 @@
-// Copyright 2021 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
diff --git a/src/core/hle/kernel/k_slab_heap.h b/src/core/hle/kernel/k_slab_heap.h
index 05c0bec9c..2b303537e 100644
--- a/src/core/hle/kernel/k_slab_heap.h
+++ b/src/core/hle/kernel/k_slab_heap.h
@@ -1,6 +1,5 @@
-// Copyright 2020 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
@@ -16,39 +15,34 @@ class KernelCore;
namespace impl {
-class KSlabHeapImpl final {
-public:
+class KSlabHeapImpl {
YUZU_NON_COPYABLE(KSlabHeapImpl);
YUZU_NON_MOVEABLE(KSlabHeapImpl);
+public:
struct Node {
Node* next{};
};
+public:
constexpr KSlabHeapImpl() = default;
- constexpr ~KSlabHeapImpl() = default;
- void Initialize(std::size_t size) {
- ASSERT(head == nullptr);
- obj_size = size;
- }
-
- constexpr std::size_t GetObjectSize() const {
- return obj_size;
+ void Initialize() {
+ ASSERT(m_head == nullptr);
}
Node* GetHead() const {
- return head;
+ return m_head;
}
void* Allocate() {
- Node* ret = head.load();
+ Node* ret = m_head.load();
do {
if (ret == nullptr) {
break;
}
- } while (!head.compare_exchange_weak(ret, ret->next));
+ } while (!m_head.compare_exchange_weak(ret, ret->next));
return ret;
}
@@ -56,170 +50,157 @@ public:
void Free(void* obj) {
Node* node = static_cast<Node*>(obj);
- Node* cur_head = head.load();
+ Node* cur_head = m_head.load();
do {
node->next = cur_head;
- } while (!head.compare_exchange_weak(cur_head, node));
+ } while (!m_head.compare_exchange_weak(cur_head, node));
}
private:
- std::atomic<Node*> head{};
- std::size_t obj_size{};
+ std::atomic<Node*> m_head{};
};
} // namespace impl
-class KSlabHeapBase {
-public:
+template <bool SupportDynamicExpansion>
+class KSlabHeapBase : protected impl::KSlabHeapImpl {
YUZU_NON_COPYABLE(KSlabHeapBase);
YUZU_NON_MOVEABLE(KSlabHeapBase);
- constexpr KSlabHeapBase() = default;
- constexpr ~KSlabHeapBase() = default;
+private:
+ size_t m_obj_size{};
+ uintptr_t m_peak{};
+ uintptr_t m_start{};
+ uintptr_t m_end{};
- constexpr bool Contains(uintptr_t addr) const {
- return start <= addr && addr < end;
- }
+private:
+ void UpdatePeakImpl(uintptr_t obj) {
+ static_assert(std::atomic_ref<uintptr_t>::is_always_lock_free);
+ std::atomic_ref<uintptr_t> peak_ref(m_peak);
- constexpr std::size_t GetSlabHeapSize() const {
- return (end - start) / GetObjectSize();
+ const uintptr_t alloc_peak = obj + this->GetObjectSize();
+ uintptr_t cur_peak = m_peak;
+ do {
+ if (alloc_peak <= cur_peak) {
+ break;
+ }
+ } while (!peak_ref.compare_exchange_strong(cur_peak, alloc_peak));
}
- constexpr std::size_t GetObjectSize() const {
- return impl.GetObjectSize();
- }
+public:
+ constexpr KSlabHeapBase() = default;
- constexpr uintptr_t GetSlabHeapAddress() const {
- return start;
+ bool Contains(uintptr_t address) const {
+ return m_start <= address && address < m_end;
}
- std::size_t GetObjectIndexImpl(const void* obj) const {
- return (reinterpret_cast<uintptr_t>(obj) - start) / GetObjectSize();
+ void Initialize(size_t obj_size, void* memory, size_t memory_size) {
+ // Ensure we don't initialize a slab using null memory.
+ ASSERT(memory != nullptr);
+
+ // Set our object size.
+ m_obj_size = obj_size;
+
+ // Initialize the base allocator.
+ KSlabHeapImpl::Initialize();
+
+ // Set our tracking variables.
+ const size_t num_obj = (memory_size / obj_size);
+ m_start = reinterpret_cast<uintptr_t>(memory);
+ m_end = m_start + num_obj * obj_size;
+ m_peak = m_start;
+
+ // Free the objects.
+ u8* cur = reinterpret_cast<u8*>(m_end);
+
+ for (size_t i = 0; i < num_obj; i++) {
+ cur -= obj_size;
+ KSlabHeapImpl::Free(cur);
+ }
}
- std::size_t GetPeakIndex() const {
- return GetObjectIndexImpl(reinterpret_cast<const void*>(peak));
+ size_t GetSlabHeapSize() const {
+ return (m_end - m_start) / this->GetObjectSize();
}
- void* AllocateImpl() {
- return impl.Allocate();
+ size_t GetObjectSize() const {
+ return m_obj_size;
}
- void FreeImpl(void* obj) {
- // Don't allow freeing an object that wasn't allocated from this heap
- ASSERT(Contains(reinterpret_cast<uintptr_t>(obj)));
+ void* Allocate() {
+ void* obj = KSlabHeapImpl::Allocate();
- impl.Free(obj);
+ return obj;
}
- void InitializeImpl(std::size_t obj_size, void* memory, std::size_t memory_size) {
- // Ensure we don't initialize a slab using null memory
- ASSERT(memory != nullptr);
-
- // Initialize the base allocator
- impl.Initialize(obj_size);
+ void Free(void* obj) {
+ // Don't allow freeing an object that wasn't allocated from this heap.
+ const bool contained = this->Contains(reinterpret_cast<uintptr_t>(obj));
+ ASSERT(contained);
+ KSlabHeapImpl::Free(obj);
+ }
- // Set our tracking variables
- const std::size_t num_obj = (memory_size / obj_size);
- start = reinterpret_cast<uintptr_t>(memory);
- end = start + num_obj * obj_size;
- peak = start;
+ size_t GetObjectIndex(const void* obj) const {
+ if constexpr (SupportDynamicExpansion) {
+ if (!this->Contains(reinterpret_cast<uintptr_t>(obj))) {
+ return std::numeric_limits<size_t>::max();
+ }
+ }
- // Free the objects
- u8* cur = reinterpret_cast<u8*>(end);
+ return (reinterpret_cast<uintptr_t>(obj) - m_start) / this->GetObjectSize();
+ }
- for (std::size_t i{}; i < num_obj; i++) {
- cur -= obj_size;
- impl.Free(cur);
- }
+ size_t GetPeakIndex() const {
+ return this->GetObjectIndex(reinterpret_cast<const void*>(m_peak));
}
-private:
- using Impl = impl::KSlabHeapImpl;
+ uintptr_t GetSlabHeapAddress() const {
+ return m_start;
+ }
- Impl impl;
- uintptr_t peak{};
- uintptr_t start{};
- uintptr_t end{};
+ size_t GetNumRemaining() const {
+ // Only calculate the number of remaining objects under debug configuration.
+ return 0;
+ }
};
template <typename T>
-class KSlabHeap final : public KSlabHeapBase {
-public:
- enum class AllocationType {
- Host,
- Guest,
- };
+class KSlabHeap final : public KSlabHeapBase<false> {
+private:
+ using BaseHeap = KSlabHeapBase<false>;
- explicit constexpr KSlabHeap(AllocationType allocation_type_ = AllocationType::Host)
- : KSlabHeapBase(), allocation_type{allocation_type_} {}
+public:
+ constexpr KSlabHeap() = default;
- void Initialize(void* memory, std::size_t memory_size) {
- if (allocation_type == AllocationType::Guest) {
- InitializeImpl(sizeof(T), memory, memory_size);
- }
+ void Initialize(void* memory, size_t memory_size) {
+ BaseHeap::Initialize(sizeof(T), memory, memory_size);
}
T* Allocate() {
- switch (allocation_type) {
- case AllocationType::Host:
- // Fallback for cases where we do not yet support allocating guest memory from the slab
- // heap, such as for kernel memory regions.
- return new T;
-
- case AllocationType::Guest:
- T* obj = static_cast<T*>(AllocateImpl());
- if (obj != nullptr) {
- new (obj) T();
- }
- return obj;
- }
+ T* obj = static_cast<T*>(BaseHeap::Allocate());
- UNREACHABLE_MSG("Invalid AllocationType {}", allocation_type);
- return nullptr;
+ if (obj != nullptr) [[likely]] {
+ std::construct_at(obj);
+ }
+ return obj;
}
- T* AllocateWithKernel(KernelCore& kernel) {
- switch (allocation_type) {
- case AllocationType::Host:
- // Fallback for cases where we do not yet support allocating guest memory from the slab
- // heap, such as for kernel memory regions.
- return new T(kernel);
+ T* Allocate(KernelCore& kernel) {
+ T* obj = static_cast<T*>(BaseHeap::Allocate());
- case AllocationType::Guest:
- T* obj = static_cast<T*>(AllocateImpl());
- if (obj != nullptr) {
- new (obj) T(kernel);
- }
- return obj;
+ if (obj != nullptr) [[likely]] {
+ std::construct_at(obj, kernel);
}
-
- UNREACHABLE_MSG("Invalid AllocationType {}", allocation_type);
- return nullptr;
+ return obj;
}
void Free(T* obj) {
- switch (allocation_type) {
- case AllocationType::Host:
- // Fallback for cases where we do not yet support allocating guest memory from the slab
- // heap, such as for kernel memory regions.
- delete obj;
- return;
-
- case AllocationType::Guest:
- FreeImpl(obj);
- return;
- }
-
- UNREACHABLE_MSG("Invalid AllocationType {}", allocation_type);
+ BaseHeap::Free(obj);
}
- constexpr std::size_t GetObjectIndex(const T* obj) const {
- return GetObjectIndexImpl(obj);
+ size_t GetObjectIndex(const T* obj) const {
+ return BaseHeap::GetObjectIndex(obj);
}
-
-private:
- const AllocationType allocation_type;
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_spin_lock.cpp b/src/core/hle/kernel/k_spin_lock.cpp
index 4412aa4bb..6e16a1849 100644
--- a/src/core/hle/kernel/k_spin_lock.cpp
+++ b/src/core/hle/kernel/k_spin_lock.cpp
@@ -1,54 +1,20 @@
-// Copyright 2021 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#include "core/hle/kernel/k_spin_lock.h"
-#if _MSC_VER
-#include <intrin.h>
-#if _M_AMD64
-#define __x86_64__ 1
-#endif
-#if _M_ARM64
-#define __aarch64__ 1
-#endif
-#else
-#if __x86_64__
-#include <xmmintrin.h>
-#endif
-#endif
-
-namespace {
-
-void ThreadPause() {
-#if __x86_64__
- _mm_pause();
-#elif __aarch64__ && _MSC_VER
- __yield();
-#elif __aarch64__
- asm("yield");
-#endif
-}
-
-} // namespace
-
namespace Kernel {
void KSpinLock::Lock() {
- while (lck.test_and_set(std::memory_order_acquire)) {
- ThreadPause();
- }
+ lck.lock();
}
void KSpinLock::Unlock() {
- lck.clear(std::memory_order_release);
+ lck.unlock();
}
bool KSpinLock::TryLock() {
- if (lck.test_and_set(std::memory_order_acquire)) {
- return false;
- }
- return true;
+ return lck.try_lock();
}
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_spin_lock.h b/src/core/hle/kernel/k_spin_lock.h
index 4d87d006a..397a93d21 100644
--- a/src/core/hle/kernel/k_spin_lock.h
+++ b/src/core/hle/kernel/k_spin_lock.h
@@ -1,10 +1,9 @@
-// Copyright 2021 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
-#include <atomic>
+#include <mutex>
#include "core/hle/kernel/k_scoped_lock.h"
@@ -25,7 +24,7 @@ public:
[[nodiscard]] bool TryLock();
private:
- std::atomic_flag lck = ATOMIC_FLAG_INIT;
+ std::mutex lck;
};
// TODO(bunnei): Alias for now, in case we want to implement these accurately in the future.
diff --git a/src/core/hle/kernel/k_synchronization_object.cpp b/src/core/hle/kernel/k_synchronization_object.cpp
index e4c5eb74f..802dca046 100644
--- a/src/core/hle/kernel/k_synchronization_object.cpp
+++ b/src/core/hle/kernel/k_synchronization_object.cpp
@@ -1,6 +1,5 @@
-// Copyright 2021 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#include "common/assert.h"
#include "common/common_types.h"
@@ -23,7 +22,7 @@ public:
: KThreadQueueWithoutEndWait(kernel_), m_objects(o), m_nodes(n), m_count(c) {}
void NotifyAvailable(KThread* waiting_thread, KSynchronizationObject* signaled_object,
- ResultCode wait_result) override {
+ Result wait_result) override {
// Determine the sync index, and unlink all nodes.
s32 sync_index = -1;
for (auto i = 0; i < m_count; ++i) {
@@ -46,8 +45,7 @@ public:
KThreadQueue::EndWait(waiting_thread, wait_result);
}
- void CancelWait(KThread* waiting_thread, ResultCode wait_result,
- bool cancel_timer_task) override {
+ void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override {
// Remove all nodes from our list.
for (auto i = 0; i < m_count; ++i) {
m_objects[i]->UnlinkNode(std::addressof(m_nodes[i]));
@@ -73,9 +71,9 @@ void KSynchronizationObject::Finalize() {
KAutoObject::Finalize();
}
-ResultCode KSynchronizationObject::Wait(KernelCore& kernel_ctx, s32* out_index,
- KSynchronizationObject** objects, const s32 num_objects,
- s64 timeout) {
+Result KSynchronizationObject::Wait(KernelCore& kernel_ctx, s32* out_index,
+ KSynchronizationObject** objects, const s32 num_objects,
+ s64 timeout) {
// Allocate space on stack for thread nodes.
std::vector<ThreadListNode> thread_nodes(num_objects);
@@ -149,7 +147,7 @@ KSynchronizationObject::KSynchronizationObject(KernelCore& kernel_)
KSynchronizationObject::~KSynchronizationObject() = default;
-void KSynchronizationObject::NotifyAvailable(ResultCode result) {
+void KSynchronizationObject::NotifyAvailable(Result result) {
KScopedSchedulerLock sl(kernel);
// If we're not signaled, we've nothing to notify.
diff --git a/src/core/hle/kernel/k_synchronization_object.h b/src/core/hle/kernel/k_synchronization_object.h
index ec235437b..8d8122ab7 100644
--- a/src/core/hle/kernel/k_synchronization_object.h
+++ b/src/core/hle/kernel/k_synchronization_object.h
@@ -1,6 +1,5 @@
-// Copyright 2021 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
@@ -25,9 +24,9 @@ public:
KThread* thread{};
};
- [[nodiscard]] static ResultCode Wait(KernelCore& kernel, s32* out_index,
- KSynchronizationObject** objects, const s32 num_objects,
- s64 timeout);
+ [[nodiscard]] static Result Wait(KernelCore& kernel, s32* out_index,
+ KSynchronizationObject** objects, const s32 num_objects,
+ s64 timeout);
void Finalize() override;
@@ -73,7 +72,7 @@ protected:
virtual void OnFinalizeSynchronizationObject() {}
- void NotifyAvailable(ResultCode result);
+ void NotifyAvailable(Result result);
void NotifyAvailable() {
return this->NotifyAvailable(ResultSuccess);
}
diff --git a/src/core/hle/kernel/k_system_control.h b/src/core/hle/kernel/k_system_control.h
index d755082c2..b8292e9d0 100644
--- a/src/core/hle/kernel/k_system_control.h
+++ b/src/core/hle/kernel/k_system_control.h
@@ -1,6 +1,5 @@
-// Copyright 2021 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp
index de3ffe0c7..174afc80d 100644
--- a/src/core/hle/kernel/k_thread.cpp
+++ b/src/core/hle/kernel/k_thread.cpp
@@ -1,6 +1,5 @@
-// Copyright 2021 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#include <algorithm>
#include <atomic>
@@ -14,9 +13,7 @@
#include "common/common_types.h"
#include "common/fiber.h"
#include "common/logging/log.h"
-#include "common/scope_exit.h"
#include "common/settings.h"
-#include "common/thread_queue_list.h"
#include "core/core.h"
#include "core/cpu_manager.h"
#include "core/hardware_properties.h"
@@ -33,7 +30,6 @@
#include "core/hle/kernel/k_worker_task_manager.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/svc_results.h"
-#include "core/hle/kernel/time_manager.h"
#include "core/hle/result.h"
#include "core/memory.h"
@@ -84,8 +80,7 @@ public:
explicit ThreadQueueImplForKThreadSetProperty(KernelCore& kernel_, KThread::WaiterList* wl)
: KThreadQueue(kernel_), m_wait_list(wl) {}
- void CancelWait(KThread* waiting_thread, ResultCode wait_result,
- bool cancel_timer_task) override {
+ void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override {
// Remove the thread from the wait list.
m_wait_list->erase(m_wait_list->iterator_to(*waiting_thread));
@@ -103,8 +98,8 @@ KThread::KThread(KernelCore& kernel_)
: KAutoObjectWithSlabHeapAndContainer{kernel_}, activity_pause_lock{kernel_} {}
KThread::~KThread() = default;
-ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack_top, s32 prio,
- s32 virt_core, KProcess* owner, ThreadType type) {
+Result KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack_top, s32 prio,
+ s32 virt_core, KProcess* owner, ThreadType type) {
// Assert parameters are valid.
ASSERT((type == ThreadType::Main) || (type == ThreadType::Dummy) ||
(Svc::HighestThreadPriority <= prio && prio <= Svc::LowestThreadPriority));
@@ -137,7 +132,7 @@ ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_s
UNIMPLEMENTED();
break;
default:
- UNREACHABLE_MSG("KThread::Initialize: Unknown ThreadType {}", static_cast<u32>(type));
+ ASSERT_MSG(false, "KThread::Initialize: Unknown ThreadType {}", static_cast<u32>(type));
break;
}
thread_type = type;
@@ -202,6 +197,10 @@ ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_s
resource_limit_release_hint = false;
cpu_time = 0;
+ // Set debug context.
+ stack_top = user_stack_top;
+ argument = arg;
+
// Clear our stack parameters.
std::memset(static_cast<void*>(std::addressof(GetStackParameters())), 0,
sizeof(StackParameters));
@@ -210,7 +209,7 @@ ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_s
if (owner != nullptr) {
// Setup the TLS, if needed.
if (type == ThreadType::User) {
- tls_address = owner->CreateTLSRegion();
+ R_TRY(owner->CreateThreadLocalRegion(std::addressof(tls_address)));
}
parent = owner;
@@ -225,7 +224,7 @@ ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_s
// Setup the stack parameters.
StackParameters& sp = GetStackParameters();
sp.cur_thread = this;
- sp.disable_count = 0;
+ sp.disable_count = 1;
SetInExceptionHandler();
// Set thread ID.
@@ -245,46 +244,51 @@ ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_s
return ResultSuccess;
}
-ResultCode KThread::InitializeThread(KThread* thread, KThreadFunction func, uintptr_t arg,
- VAddr user_stack_top, s32 prio, s32 core, KProcess* owner,
- ThreadType type, std::function<void(void*)>&& init_func,
- void* init_func_parameter) {
+Result KThread::InitializeThread(KThread* thread, KThreadFunction func, uintptr_t arg,
+ VAddr user_stack_top, s32 prio, s32 core, KProcess* owner,
+ ThreadType type, std::function<void()>&& init_func) {
// Initialize the thread.
R_TRY(thread->Initialize(func, arg, user_stack_top, prio, core, owner, type));
// Initialize emulation parameters.
- thread->host_context =
- std::make_shared<Common::Fiber>(std::move(init_func), init_func_parameter);
+ thread->host_context = std::make_shared<Common::Fiber>(std::move(init_func));
thread->is_single_core = !Settings::values.use_multi_core.GetValue();
return ResultSuccess;
}
-ResultCode KThread::InitializeDummyThread(KThread* thread) {
- return thread->Initialize({}, {}, {}, DummyThreadPriority, 3, {}, ThreadType::Dummy);
+Result KThread::InitializeDummyThread(KThread* thread) {
+ // Initialize the thread.
+ R_TRY(thread->Initialize({}, {}, {}, DummyThreadPriority, 3, {}, ThreadType::Dummy));
+
+ // Initialize emulation parameters.
+ thread->stack_parameters.disable_count = 0;
+
+ return ResultSuccess;
+}
+
+Result KThread::InitializeMainThread(Core::System& system, KThread* thread, s32 virt_core) {
+ return InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {}, ThreadType::Main,
+ system.GetCpuManager().GetGuestActivateFunc());
}
-ResultCode KThread::InitializeIdleThread(Core::System& system, KThread* thread, s32 virt_core) {
+Result KThread::InitializeIdleThread(Core::System& system, KThread* thread, s32 virt_core) {
return InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {}, ThreadType::Main,
- Core::CpuManager::GetIdleThreadStartFunc(),
- system.GetCpuManager().GetStartFuncParamater());
+ system.GetCpuManager().GetIdleThreadStartFunc());
}
-ResultCode KThread::InitializeHighPriorityThread(Core::System& system, KThread* thread,
- KThreadFunction func, uintptr_t arg,
- s32 virt_core) {
+Result KThread::InitializeHighPriorityThread(Core::System& system, KThread* thread,
+ KThreadFunction func, uintptr_t arg, s32 virt_core) {
return InitializeThread(thread, func, arg, {}, {}, virt_core, nullptr, ThreadType::HighPriority,
- Core::CpuManager::GetSuspendThreadStartFunc(),
- system.GetCpuManager().GetStartFuncParamater());
+ system.GetCpuManager().GetShutdownThreadStartFunc());
}
-ResultCode KThread::InitializeUserThread(Core::System& system, KThread* thread,
- KThreadFunction func, uintptr_t arg, VAddr user_stack_top,
- s32 prio, s32 virt_core, KProcess* owner) {
+Result KThread::InitializeUserThread(Core::System& system, KThread* thread, KThreadFunction func,
+ uintptr_t arg, VAddr user_stack_top, s32 prio, s32 virt_core,
+ KProcess* owner) {
system.Kernel().GlobalSchedulerContext().AddThread(thread);
return InitializeThread(thread, func, arg, user_stack_top, prio, virt_core, owner,
- ThreadType::User, Core::CpuManager::GetGuestThreadStartFunc(),
- system.GetCpuManager().GetStartFuncParamater());
+ ThreadType::User, system.GetCpuManager().GetGuestThreadFunc());
}
void KThread::PostDestroy(uintptr_t arg) {
@@ -305,7 +309,7 @@ void KThread::Finalize() {
// If the thread has a local region, delete it.
if (tls_address != 0) {
- parent->FreeTLSRegion(tls_address);
+ ASSERT(parent->DeleteThreadLocalRegion(tls_address).IsSuccess());
}
// Release any waiters.
@@ -315,17 +319,26 @@ void KThread::Finalize() {
auto it = waiter_list.begin();
while (it != waiter_list.end()) {
- // Clear the lock owner
- it->SetLockOwner(nullptr);
+ // Get the thread.
+ KThread* const waiter = std::addressof(*it);
+
+ // The thread shouldn't be a kernel waiter.
+ ASSERT(!IsKernelAddressKey(waiter->GetAddressKey()));
+
+ // Clear the lock owner.
+ waiter->SetLockOwner(nullptr);
// Erase the waiter from our list.
it = waiter_list.erase(it);
// Cancel the thread's wait.
- it->CancelWait(ResultInvalidState, true);
+ waiter->CancelWait(ResultInvalidState, true);
}
}
+ // Release host emulation members.
+ host_context.reset();
+
// Perform inherited finalization.
KSynchronizationObject::Finalize();
}
@@ -379,7 +392,7 @@ void KThread::FinishTermination() {
for (std::size_t i = 0; i < static_cast<std::size_t>(Core::Hardware::NUM_CPU_CORES); ++i) {
KThread* core_thread{};
do {
- core_thread = kernel.Scheduler(i).GetCurrentThread();
+ core_thread = kernel.Scheduler(i).GetSchedulerCurrentThread();
} while (core_thread == this);
}
}
@@ -484,9 +497,7 @@ void KThread::Unpin() {
// Resume any threads that began waiting on us while we were pinned.
for (auto it = pinned_waiter_list.begin(); it != pinned_waiter_list.end(); ++it) {
- if (it->GetState() == ThreadState::Waiting) {
- it->SetState(ThreadState::Runnable);
- }
+ it->EndWait(ResultSuccess);
}
}
@@ -520,7 +531,7 @@ void KThread::ClearInterruptFlag() {
memory.Write16(tls_address + offsetof(ThreadLocalRegion, interrupt_flag), 0);
}
-ResultCode KThread::GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask) {
+Result KThread::GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask) {
KScopedSchedulerLock sl{kernel};
// Get the virtual mask.
@@ -530,7 +541,7 @@ ResultCode KThread::GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask) {
return ResultSuccess;
}
-ResultCode KThread::GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask) {
+Result KThread::GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask) {
KScopedSchedulerLock sl{kernel};
ASSERT(num_core_migration_disables >= 0);
@@ -546,7 +557,7 @@ ResultCode KThread::GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_m
return ResultSuccess;
}
-ResultCode KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) {
+Result KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) {
ASSERT(parent != nullptr);
ASSERT(v_affinity_mask != 0);
KScopedLightLock lk(activity_pause_lock);
@@ -628,7 +639,7 @@ ResultCode KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) {
s32 thread_core;
for (thread_core = 0; thread_core < static_cast<s32>(Core::Hardware::NUM_CPU_CORES);
++thread_core) {
- if (kernel.Scheduler(thread_core).GetCurrentThread() == this) {
+ if (kernel.Scheduler(thread_core).GetSchedulerCurrentThread() == this) {
thread_is_current = true;
break;
}
@@ -723,10 +734,10 @@ void KThread::UpdateState() {
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
// Set our suspend flags in state.
- const auto old_state = thread_state;
+ const ThreadState old_state = thread_state.load(std::memory_order_relaxed);
const auto new_state =
static_cast<ThreadState>(this->GetSuspendFlags()) | (old_state & ThreadState::Mask);
- thread_state = new_state;
+ thread_state.store(new_state, std::memory_order_relaxed);
// Note the state change in scheduler.
if (new_state != old_state) {
@@ -738,14 +749,27 @@ void KThread::Continue() {
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
// Clear our suspend flags in state.
- const auto old_state = thread_state;
- thread_state = old_state & ThreadState::Mask;
+ const ThreadState old_state = thread_state.load(std::memory_order_relaxed);
+ thread_state.store(old_state & ThreadState::Mask, std::memory_order_relaxed);
// Note the state change in scheduler.
KScheduler::OnThreadStateChanged(kernel, this, old_state);
}
-ResultCode KThread::SetActivity(Svc::ThreadActivity activity) {
+void KThread::WaitUntilSuspended() {
+ // Make sure we have a suspend requested.
+ ASSERT(IsSuspendRequested());
+
+ // Loop until the thread is not executing on any core.
+ for (std::size_t i = 0; i < static_cast<std::size_t>(Core::Hardware::NUM_CPU_CORES); ++i) {
+ KThread* core_thread{};
+ do {
+ core_thread = kernel.Scheduler(i).GetSchedulerCurrentThread();
+ } while (core_thread == this);
+ }
+}
+
+Result KThread::SetActivity(Svc::ThreadActivity activity) {
// Lock ourselves.
KScopedLightLock lk(activity_pause_lock);
@@ -806,7 +830,7 @@ ResultCode KThread::SetActivity(Svc::ThreadActivity activity) {
// Check if the thread is currently running.
// If it is, we'll need to retry.
for (auto i = 0; i < static_cast<s32>(Core::Hardware::NUM_CPU_CORES); ++i) {
- if (kernel.Scheduler(i).GetCurrentThread() == this) {
+ if (kernel.Scheduler(i).GetSchedulerCurrentThread() == this) {
thread_is_current = true;
break;
}
@@ -818,7 +842,7 @@ ResultCode KThread::SetActivity(Svc::ThreadActivity activity) {
return ResultSuccess;
}
-ResultCode KThread::GetThreadContext3(std::vector<u8>& out) {
+Result KThread::GetThreadContext3(std::vector<u8>& out) {
// Lock ourselves.
KScopedLightLock lk{activity_pause_lock};
@@ -868,6 +892,7 @@ void KThread::AddWaiterImpl(KThread* thread) {
// Keep track of how many kernel waiters we have.
if (IsKernelAddressKey(thread->GetAddressKey())) {
ASSERT((num_kernel_waiters++) >= 0);
+ KScheduler::SetSchedulerUpdateNeeded(kernel);
}
// Insert the waiter.
@@ -881,6 +906,7 @@ void KThread::RemoveWaiterImpl(KThread* thread) {
// Keep track of how many kernel waiters we have.
if (IsKernelAddressKey(thread->GetAddressKey())) {
ASSERT((num_kernel_waiters--) > 0);
+ KScheduler::SetSchedulerUpdateNeeded(kernel);
}
// Remove the waiter.
@@ -956,6 +982,7 @@ KThread* KThread::RemoveWaiterByKey(s32* out_num_waiters, VAddr key) {
// Keep track of how many kernel waiters we have.
if (IsKernelAddressKey(thread->GetAddressKey())) {
ASSERT((num_kernel_waiters--) > 0);
+ KScheduler::SetSchedulerUpdateNeeded(kernel);
}
it = waiter_list.erase(it);
@@ -983,7 +1010,7 @@ KThread* KThread::RemoveWaiterByKey(s32* out_num_waiters, VAddr key) {
return next_lock_owner;
}
-ResultCode KThread::Run() {
+Result KThread::Run() {
while (true) {
KScopedSchedulerLock lk{kernel};
@@ -1011,8 +1038,6 @@ ResultCode KThread::Run() {
// Set our state and finish.
SetState(ThreadState::Runnable);
- DisableDispatch();
-
return ResultSuccess;
}
}
@@ -1044,9 +1069,11 @@ void KThread::Exit() {
// Register the thread as a work task.
KWorkerTaskManager::AddTask(kernel, KWorkerTaskManager::WorkerType::Exit, this);
}
+
+ UNREACHABLE_MSG("KThread::Exit() would return");
}
-ResultCode KThread::Sleep(s64 timeout) {
+Result KThread::Sleep(s64 timeout) {
ASSERT(!kernel.GlobalSchedulerContext().IsLocked());
ASSERT(this == GetCurrentThreadPointer(kernel));
ASSERT(timeout > 0);
@@ -1079,17 +1106,12 @@ void KThread::IfDummyThreadTryWait() {
return;
}
- // Block until we can grab the lock.
- KScopedSpinLock lk{dummy_wait_lock};
-}
-
-void KThread::IfDummyThreadBeginWait() {
- if (!IsDummyThread()) {
- return;
- }
+ ASSERT(!kernel.IsPhantomModeForSingleCore());
- // Ensure the thread will block when IfDummyThreadTryWait is called.
- dummy_wait_lock.Lock();
+ // Block until we are no longer waiting.
+ std::unique_lock lk(dummy_wait_lock);
+ dummy_wait_cv.wait(
+ lk, [&] { return GetState() != ThreadState::Waiting || kernel.IsShuttingDown(); });
}
void KThread::IfDummyThreadEndWait() {
@@ -1097,8 +1119,8 @@ void KThread::IfDummyThreadEndWait() {
return;
}
- // Ensure the thread will no longer block.
- dummy_wait_lock.Unlock();
+ // Wake up the waiting thread.
+ dummy_wait_cv.notify_one();
}
void KThread::BeginWait(KThreadQueue* queue) {
@@ -1107,12 +1129,9 @@ void KThread::BeginWait(KThreadQueue* queue) {
// Set our wait queue.
wait_queue = queue;
-
- // Special case for dummy threads to ensure they block.
- IfDummyThreadBeginWait();
}
-void KThread::NotifyAvailable(KSynchronizationObject* signaled_object, ResultCode wait_result_) {
+void KThread::NotifyAvailable(KSynchronizationObject* signaled_object, Result wait_result_) {
// Lock the scheduler.
KScopedSchedulerLock sl(kernel);
@@ -1122,7 +1141,7 @@ void KThread::NotifyAvailable(KSynchronizationObject* signaled_object, ResultCod
}
}
-void KThread::EndWait(ResultCode wait_result_) {
+void KThread::EndWait(Result wait_result_) {
// Lock the scheduler.
KScopedSchedulerLock sl(kernel);
@@ -1141,7 +1160,7 @@ void KThread::EndWait(ResultCode wait_result_) {
}
}
-void KThread::CancelWait(ResultCode wait_result_, bool cancel_timer_task) {
+void KThread::CancelWait(Result wait_result_, bool cancel_timer_task) {
// Lock the scheduler.
KScopedSchedulerLock sl(kernel);
@@ -1158,10 +1177,11 @@ void KThread::SetState(ThreadState state) {
SetMutexWaitAddressForDebugging({});
SetWaitReasonForDebugging({});
- const ThreadState old_state = thread_state;
- thread_state =
- static_cast<ThreadState>((old_state & ~ThreadState::Mask) | (state & ThreadState::Mask));
- if (thread_state != old_state) {
+ const ThreadState old_state = thread_state.load(std::memory_order_relaxed);
+ thread_state.store(
+ static_cast<ThreadState>((old_state & ~ThreadState::Mask) | (state & ThreadState::Mask)),
+ std::memory_order_relaxed);
+ if (thread_state.load(std::memory_order_relaxed) != old_state) {
KScheduler::OnThreadStateChanged(kernel, this, old_state);
}
}
@@ -1170,6 +1190,10 @@ std::shared_ptr<Common::Fiber>& KThread::GetHostContext() {
return host_context;
}
+void SetCurrentThread(KernelCore& kernel, KThread* thread) {
+ kernel.SetCurrentEmuThread(thread);
+}
+
KThread* GetCurrentThreadPointer(KernelCore& kernel) {
return kernel.GetCurrentEmuThread();
}
@@ -1188,16 +1212,13 @@ KScopedDisableDispatch::~KScopedDisableDispatch() {
return;
}
- // Skip the reschedule if single-core, as dispatch tracking is disabled here.
- if (!Settings::values.use_multi_core.GetValue()) {
- return;
- }
-
if (GetCurrentThread(kernel).GetDisableDispatchCount() <= 1) {
- auto scheduler = kernel.CurrentScheduler();
+ auto* scheduler = kernel.CurrentScheduler();
- if (scheduler) {
+ if (scheduler && !kernel.IsPhantomModeForSingleCore()) {
scheduler->RescheduleCurrentCore();
+ } else {
+ KScheduler::RescheduleCurrentHLEThread(kernel);
}
} else {
GetCurrentThread(kernel).EnableDispatch();
diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h
index d058db62c..9ee20208e 100644
--- a/src/core/hle/kernel/k_thread.h
+++ b/src/core/hle/kernel/k_thread.h
@@ -1,10 +1,12 @@
-// Copyright 2021 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <array>
+#include <atomic>
+#include <condition_variable>
+#include <mutex>
#include <span>
#include <string>
#include <utility>
@@ -14,6 +16,7 @@
#include "common/common_types.h"
#include "common/intrusive_red_black_tree.h"
+#include "common/spin_lock.h"
#include "core/arm/arm_interface.h"
#include "core/hle/kernel/k_affinity_mask.h"
#include "core/hle/kernel/k_light_lock.h"
@@ -97,6 +100,13 @@ enum class ThreadWaitReasonForDebugging : u32 {
Suspended, ///< Thread is waiting due to process suspension
};
+enum class StepState : u32 {
+ NotStepping, ///< Thread is not currently stepping
+ StepPending, ///< Thread will step when next scheduled
+ StepPerformed, ///< Thread has stepped, waiting to be scheduled again
+};
+
+void SetCurrentThread(KernelCore& kernel, KThread* thread);
[[nodiscard]] KThread* GetCurrentThreadPointer(KernelCore& kernel);
[[nodiscard]] KThread& GetCurrentThread(KernelCore& kernel);
[[nodiscard]] s32 GetCurrentCoreId(KernelCore& kernel);
@@ -166,7 +176,7 @@ public:
void SetBasePriority(s32 value);
- [[nodiscard]] ResultCode Run();
+ [[nodiscard]] Result Run();
void Exit();
@@ -198,6 +208,8 @@ public:
void Continue();
+ void WaitUntilSuspended();
+
constexpr void SetSyncedIndex(s32 index) {
synced_index = index;
}
@@ -206,11 +218,11 @@ public:
return synced_index;
}
- constexpr void SetWaitResult(ResultCode wait_res) {
+ constexpr void SetWaitResult(Result wait_res) {
wait_result = wait_res;
}
- [[nodiscard]] constexpr ResultCode GetWaitResult() const {
+ [[nodiscard]] constexpr Result GetWaitResult() const {
return wait_result;
}
@@ -255,15 +267,23 @@ public:
[[nodiscard]] std::shared_ptr<Common::Fiber>& GetHostContext();
[[nodiscard]] ThreadState GetState() const {
- return thread_state & ThreadState::Mask;
+ return thread_state.load(std::memory_order_relaxed) & ThreadState::Mask;
}
[[nodiscard]] ThreadState GetRawState() const {
- return thread_state;
+ return thread_state.load(std::memory_order_relaxed);
}
void SetState(ThreadState state);
+ [[nodiscard]] StepState GetStepState() const {
+ return step_state;
+ }
+
+ void SetStepState(StepState state) {
+ step_state = state;
+ }
+
[[nodiscard]] s64 GetLastScheduledTick() const {
return last_scheduled_tick;
}
@@ -325,15 +345,15 @@ public:
return physical_affinity_mask;
}
- [[nodiscard]] ResultCode GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask);
+ [[nodiscard]] Result GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask);
- [[nodiscard]] ResultCode GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask);
+ [[nodiscard]] Result GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask);
- [[nodiscard]] ResultCode SetCoreMask(s32 cpu_core_id, u64 v_affinity_mask);
+ [[nodiscard]] Result SetCoreMask(s32 cpu_core_id, u64 v_affinity_mask);
- [[nodiscard]] ResultCode SetActivity(Svc::ThreadActivity activity);
+ [[nodiscard]] Result SetActivity(Svc::ThreadActivity activity);
- [[nodiscard]] ResultCode Sleep(s64 timeout);
+ [[nodiscard]] Result Sleep(s64 timeout);
[[nodiscard]] s64 GetYieldScheduleCount() const {
return schedule_count;
@@ -391,20 +411,22 @@ public:
static void PostDestroy(uintptr_t arg);
- [[nodiscard]] static ResultCode InitializeDummyThread(KThread* thread);
+ [[nodiscard]] static Result InitializeDummyThread(KThread* thread);
+
+ [[nodiscard]] static Result InitializeMainThread(Core::System& system, KThread* thread,
+ s32 virt_core);
- [[nodiscard]] static ResultCode InitializeIdleThread(Core::System& system, KThread* thread,
- s32 virt_core);
+ [[nodiscard]] static Result InitializeIdleThread(Core::System& system, KThread* thread,
+ s32 virt_core);
- [[nodiscard]] static ResultCode InitializeHighPriorityThread(Core::System& system,
- KThread* thread,
- KThreadFunction func,
- uintptr_t arg, s32 virt_core);
+ [[nodiscard]] static Result InitializeHighPriorityThread(Core::System& system, KThread* thread,
+ KThreadFunction func, uintptr_t arg,
+ s32 virt_core);
- [[nodiscard]] static ResultCode InitializeUserThread(Core::System& system, KThread* thread,
- KThreadFunction func, uintptr_t arg,
- VAddr user_stack_top, s32 prio,
- s32 virt_core, KProcess* owner);
+ [[nodiscard]] static Result InitializeUserThread(Core::System& system, KThread* thread,
+ KThreadFunction func, uintptr_t arg,
+ VAddr user_stack_top, s32 prio, s32 virt_core,
+ KProcess* owner);
public:
struct StackParameters {
@@ -461,39 +483,16 @@ public:
return per_core_priority_queue_entry[core];
}
- [[nodiscard]] bool IsKernelThread() const {
- return GetActiveCore() == 3;
- }
-
- [[nodiscard]] bool IsDispatchTrackingDisabled() const {
- return is_single_core || IsKernelThread();
- }
-
[[nodiscard]] s32 GetDisableDispatchCount() const {
- if (IsDispatchTrackingDisabled()) {
- // TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch.
- return 1;
- }
-
return this->GetStackParameters().disable_count;
}
void DisableDispatch() {
- if (IsDispatchTrackingDisabled()) {
- // TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch.
- return;
- }
-
ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() >= 0);
this->GetStackParameters().disable_count++;
}
void EnableDispatch() {
- if (IsDispatchTrackingDisabled()) {
- // TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch.
- return;
- }
-
ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() > 0);
this->GetStackParameters().disable_count--;
}
@@ -590,7 +589,7 @@ public:
void RemoveWaiter(KThread* thread);
- [[nodiscard]] ResultCode GetThreadContext3(std::vector<u8>& out);
+ [[nodiscard]] Result GetThreadContext3(std::vector<u8>& out);
[[nodiscard]] KThread* RemoveWaiterByKey(s32* out_num_waiters, VAddr key);
@@ -616,9 +615,9 @@ public:
}
void BeginWait(KThreadQueue* queue);
- void NotifyAvailable(KSynchronizationObject* signaled_object, ResultCode wait_result_);
- void EndWait(ResultCode wait_result_);
- void CancelWait(ResultCode wait_result_, bool cancel_timer_task);
+ void NotifyAvailable(KSynchronizationObject* signaled_object, Result wait_result_);
+ void EndWait(Result wait_result_);
+ void CancelWait(Result wait_result_, bool cancel_timer_task);
[[nodiscard]] bool HasWaiters() const {
return !waiter_list.empty();
@@ -641,9 +640,16 @@ public:
// blocking as needed.
void IfDummyThreadTryWait();
- void IfDummyThreadBeginWait();
void IfDummyThreadEndWait();
+ [[nodiscard]] uintptr_t GetArgument() const {
+ return argument;
+ }
+
+ [[nodiscard]] VAddr GetUserStackTop() const {
+ return stack_top;
+ }
+
private:
static constexpr size_t PriorityInheritanceCountMax = 10;
union SyncObjectBuffer {
@@ -656,7 +662,7 @@ private:
static_assert(sizeof(SyncObjectBuffer::sync_objects) == sizeof(SyncObjectBuffer::handles));
struct ConditionVariableComparator {
- struct LightCompareType {
+ struct RedBlackKeyType {
u64 cv_key{};
s32 priority{};
@@ -672,8 +678,8 @@ private:
template <typename T>
requires(
std::same_as<T, KThread> ||
- std::same_as<T, LightCompareType>) static constexpr int Compare(const T& lhs,
- const KThread& rhs) {
+ std::same_as<T, RedBlackKeyType>) static constexpr int Compare(const T& lhs,
+ const KThread& rhs) {
const u64 l_key = lhs.GetConditionVariableKey();
const u64 r_key = rhs.GetConditionVariableKey();
@@ -697,14 +703,13 @@ private:
void FinishTermination();
- [[nodiscard]] ResultCode Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack_top,
- s32 prio, s32 virt_core, KProcess* owner, ThreadType type);
+ [[nodiscard]] Result Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack_top,
+ s32 prio, s32 virt_core, KProcess* owner, ThreadType type);
- [[nodiscard]] static ResultCode InitializeThread(KThread* thread, KThreadFunction func,
- uintptr_t arg, VAddr user_stack_top, s32 prio,
- s32 core, KProcess* owner, ThreadType type,
- std::function<void(void*)>&& init_func,
- void* init_func_parameter);
+ [[nodiscard]] static Result InitializeThread(KThread* thread, KThreadFunction func,
+ uintptr_t arg, VAddr user_stack_top, s32 prio,
+ s32 core, KProcess* owner, ThreadType type,
+ std::function<void()>&& init_func);
static void RestorePriority(KernelCore& kernel_ctx, KThread* thread);
@@ -741,7 +746,7 @@ private:
u32 suspend_request_flags{};
u32 suspend_allowed_flags{};
s32 synced_index{};
- ResultCode wait_result{ResultSuccess};
+ Result wait_result{ResultSuccess};
s32 base_priority{};
s32 physical_ideal_core_id{};
s32 virtual_ideal_core_id{};
@@ -751,7 +756,7 @@ private:
KAffinityMask original_physical_affinity_mask{};
s32 original_physical_ideal_core_id{};
s32 num_core_migration_disables{};
- ThreadState thread_state{};
+ std::atomic<ThreadState> thread_state{};
std::atomic<bool> termination_requested{};
bool wait_cancelled{};
bool cancellable{};
@@ -761,18 +766,22 @@ private:
s8 priority_inheritance_count{};
bool resource_limit_release_hint{};
StackParameters stack_parameters{};
- KSpinLock context_guard{};
- KSpinLock dummy_wait_lock{};
+ Common::SpinLock context_guard{};
// For emulation
std::shared_ptr<Common::Fiber> host_context{};
bool is_single_core{};
ThreadType thread_type{};
+ StepState step_state{};
+ std::mutex dummy_wait_lock;
+ std::condition_variable dummy_wait_cv;
// For debugging
std::vector<KSynchronizationObject*> wait_objects_for_debugging;
VAddr mutex_wait_address_for_debugging{};
ThreadWaitReasonForDebugging wait_reason_for_debugging{};
+ uintptr_t argument;
+ VAddr stack_top;
public:
using ConditionVariableThreadTreeType = ConditionVariableThreadTree;
diff --git a/src/core/hle/kernel/k_thread_local_page.cpp b/src/core/hle/kernel/k_thread_local_page.cpp
new file mode 100644
index 000000000..563560114
--- /dev/null
+++ b/src/core/hle/kernel/k_thread_local_page.cpp
@@ -0,0 +1,67 @@
+// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "common/scope_exit.h"
+#include "core/core.h"
+
+#include "core/hle/kernel/k_memory_block.h"
+#include "core/hle/kernel/k_page_buffer.h"
+#include "core/hle/kernel/k_page_table.h"
+#include "core/hle/kernel/k_process.h"
+#include "core/hle/kernel/k_thread_local_page.h"
+#include "core/hle/kernel/kernel.h"
+
+namespace Kernel {
+
+Result KThreadLocalPage::Initialize(KernelCore& kernel, KProcess* process) {
+ // Set that this process owns us.
+ m_owner = process;
+ m_kernel = &kernel;
+
+ // Allocate a new page.
+ KPageBuffer* page_buf = KPageBuffer::Allocate(kernel);
+ R_UNLESS(page_buf != nullptr, ResultOutOfMemory);
+ auto page_buf_guard = SCOPE_GUARD({ KPageBuffer::Free(kernel, page_buf); });
+
+ // Map the address in.
+ const auto phys_addr = kernel.System().DeviceMemory().GetPhysicalAddr(page_buf);
+ R_TRY(m_owner->PageTable().MapPages(std::addressof(m_virt_addr), 1, PageSize, phys_addr,
+ KMemoryState::ThreadLocal,
+ KMemoryPermission::UserReadWrite));
+
+ // We succeeded.
+ page_buf_guard.Cancel();
+
+ return ResultSuccess;
+}
+
+Result KThreadLocalPage::Finalize() {
+ // Get the physical address of the page.
+ const PAddr phys_addr = m_owner->PageTable().GetPhysicalAddr(m_virt_addr);
+ ASSERT(phys_addr);
+
+ // Unmap the page.
+ R_TRY(m_owner->PageTable().UnmapPages(this->GetAddress(), 1, KMemoryState::ThreadLocal));
+
+ // Free the page.
+ KPageBuffer::Free(*m_kernel, KPageBuffer::FromPhysicalAddress(m_kernel->System(), phys_addr));
+
+ return ResultSuccess;
+}
+
+VAddr KThreadLocalPage::Reserve() {
+ for (size_t i = 0; i < m_is_region_free.size(); i++) {
+ if (m_is_region_free[i]) {
+ m_is_region_free[i] = false;
+ return this->GetRegionAddress(i);
+ }
+ }
+
+ return 0;
+}
+
+void KThreadLocalPage::Release(VAddr addr) {
+ m_is_region_free[this->GetRegionIndex(addr)] = true;
+}
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_thread_local_page.h b/src/core/hle/kernel/k_thread_local_page.h
new file mode 100644
index 000000000..0a7f22680
--- /dev/null
+++ b/src/core/hle/kernel/k_thread_local_page.h
@@ -0,0 +1,110 @@
+// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include <algorithm>
+#include <array>
+
+#include "common/alignment.h"
+#include "common/assert.h"
+#include "common/common_types.h"
+#include "common/intrusive_red_black_tree.h"
+#include "core/hle/kernel/memory_types.h"
+#include "core/hle/kernel/slab_helpers.h"
+#include "core/hle/result.h"
+
+namespace Kernel {
+
+class KernelCore;
+class KProcess;
+
+class KThreadLocalPage final : public Common::IntrusiveRedBlackTreeBaseNode<KThreadLocalPage>,
+ public KSlabAllocated<KThreadLocalPage> {
+public:
+ static constexpr size_t RegionsPerPage = PageSize / Svc::ThreadLocalRegionSize;
+ static_assert(RegionsPerPage > 0);
+
+public:
+ constexpr explicit KThreadLocalPage(VAddr addr = {}) : m_virt_addr(addr) {
+ m_is_region_free.fill(true);
+ }
+
+ constexpr VAddr GetAddress() const {
+ return m_virt_addr;
+ }
+
+ Result Initialize(KernelCore& kernel, KProcess* process);
+ Result Finalize();
+
+ VAddr Reserve();
+ void Release(VAddr addr);
+
+ bool IsAllUsed() const {
+ return std::ranges::all_of(m_is_region_free.begin(), m_is_region_free.end(),
+ [](bool is_free) { return !is_free; });
+ }
+
+ bool IsAllFree() const {
+ return std::ranges::all_of(m_is_region_free.begin(), m_is_region_free.end(),
+ [](bool is_free) { return is_free; });
+ }
+
+ bool IsAnyUsed() const {
+ return !this->IsAllFree();
+ }
+
+ bool IsAnyFree() const {
+ return !this->IsAllUsed();
+ }
+
+public:
+ using RedBlackKeyType = VAddr;
+
+ static constexpr RedBlackKeyType GetRedBlackKey(const RedBlackKeyType& v) {
+ return v;
+ }
+ static constexpr RedBlackKeyType GetRedBlackKey(const KThreadLocalPage& v) {
+ return v.GetAddress();
+ }
+
+ template <typename T>
+ requires(std::same_as<T, KThreadLocalPage> ||
+ std::same_as<T, RedBlackKeyType>) static constexpr int Compare(const T& lhs,
+ const KThreadLocalPage&
+ rhs) {
+ const VAddr lval = GetRedBlackKey(lhs);
+ const VAddr rval = GetRedBlackKey(rhs);
+
+ if (lval < rval) {
+ return -1;
+ } else if (lval == rval) {
+ return 0;
+ } else {
+ return 1;
+ }
+ }
+
+private:
+ constexpr VAddr GetRegionAddress(size_t i) const {
+ return this->GetAddress() + i * Svc::ThreadLocalRegionSize;
+ }
+
+ constexpr bool Contains(VAddr addr) const {
+ return this->GetAddress() <= addr && addr < this->GetAddress() + PageSize;
+ }
+
+ constexpr size_t GetRegionIndex(VAddr addr) const {
+ ASSERT(Common::IsAligned(addr, Svc::ThreadLocalRegionSize));
+ ASSERT(this->Contains(addr));
+ return (addr - this->GetAddress()) / Svc::ThreadLocalRegionSize;
+ }
+
+private:
+ VAddr m_virt_addr{};
+ KProcess* m_owner{};
+ KernelCore* m_kernel{};
+ std::array<bool, RegionsPerPage> m_is_region_free{};
+};
+
+} // namespace Kernel
diff --git a/src/core/hle/kernel/k_thread_queue.cpp b/src/core/hle/kernel/k_thread_queue.cpp
index d5248b547..9f4e081ba 100644
--- a/src/core/hle/kernel/k_thread_queue.cpp
+++ b/src/core/hle/kernel/k_thread_queue.cpp
@@ -1,6 +1,5 @@
-// Copyright 2021 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#include "core/hle/kernel/k_thread_queue.h"
#include "core/hle/kernel/kernel.h"
@@ -10,9 +9,9 @@ namespace Kernel {
void KThreadQueue::NotifyAvailable([[maybe_unused]] KThread* waiting_thread,
[[maybe_unused]] KSynchronizationObject* signaled_object,
- [[maybe_unused]] ResultCode wait_result) {}
+ [[maybe_unused]] Result wait_result) {}
-void KThreadQueue::EndWait(KThread* waiting_thread, ResultCode wait_result) {
+void KThreadQueue::EndWait(KThread* waiting_thread, Result wait_result) {
// Set the thread's wait result.
waiting_thread->SetWaitResult(wait_result);
@@ -26,8 +25,7 @@ void KThreadQueue::EndWait(KThread* waiting_thread, ResultCode wait_result) {
kernel.TimeManager().UnscheduleTimeEvent(waiting_thread);
}
-void KThreadQueue::CancelWait(KThread* waiting_thread, ResultCode wait_result,
- bool cancel_timer_task) {
+void KThreadQueue::CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) {
// Set the thread's wait result.
waiting_thread->SetWaitResult(wait_result);
@@ -44,6 +42,6 @@ void KThreadQueue::CancelWait(KThread* waiting_thread, ResultCode wait_result,
}
void KThreadQueueWithoutEndWait::EndWait([[maybe_unused]] KThread* waiting_thread,
- [[maybe_unused]] ResultCode wait_result) {}
+ [[maybe_unused]] Result wait_result) {}
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_thread_queue.h b/src/core/hle/kernel/k_thread_queue.h
index ccb718e49..8d76ece81 100644
--- a/src/core/hle/kernel/k_thread_queue.h
+++ b/src/core/hle/kernel/k_thread_queue.h
@@ -1,6 +1,5 @@
-// Copyright 2021 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
@@ -15,10 +14,9 @@ public:
virtual ~KThreadQueue() = default;
virtual void NotifyAvailable(KThread* waiting_thread, KSynchronizationObject* signaled_object,
- ResultCode wait_result);
- virtual void EndWait(KThread* waiting_thread, ResultCode wait_result);
- virtual void CancelWait(KThread* waiting_thread, ResultCode wait_result,
- bool cancel_timer_task);
+ Result wait_result);
+ virtual void EndWait(KThread* waiting_thread, Result wait_result);
+ virtual void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task);
private:
KernelCore& kernel;
@@ -29,7 +27,7 @@ class KThreadQueueWithoutEndWait : public KThreadQueue {
public:
explicit KThreadQueueWithoutEndWait(KernelCore& kernel_) : KThreadQueue(kernel_) {}
- void EndWait(KThread* waiting_thread, ResultCode wait_result) override final;
+ void EndWait(KThread* waiting_thread, Result wait_result) override final;
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_trace.h b/src/core/hle/kernel/k_trace.h
index d3fed1888..d61af4830 100644
--- a/src/core/hle/kernel/k_trace.h
+++ b/src/core/hle/kernel/k_trace.h
@@ -1,6 +1,5 @@
-// Copyright 2021 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
diff --git a/src/core/hle/kernel/k_transfer_memory.cpp b/src/core/hle/kernel/k_transfer_memory.cpp
index 1732925c9..b0320eb73 100644
--- a/src/core/hle/kernel/k_transfer_memory.cpp
+++ b/src/core/hle/kernel/k_transfer_memory.cpp
@@ -1,6 +1,5 @@
-// Copyright 2021 yuzu emulator team
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_resource_limit.h"
@@ -14,8 +13,8 @@ KTransferMemory::KTransferMemory(KernelCore& kernel_)
KTransferMemory::~KTransferMemory() = default;
-ResultCode KTransferMemory::Initialize(VAddr address_, std::size_t size_,
- Svc::MemoryPermission owner_perm_) {
+Result KTransferMemory::Initialize(VAddr address_, std::size_t size_,
+ Svc::MemoryPermission owner_perm_) {
// Set members.
owner = kernel.CurrentProcess();
diff --git a/src/core/hle/kernel/k_transfer_memory.h b/src/core/hle/kernel/k_transfer_memory.h
index cb7521823..85d508ee7 100644
--- a/src/core/hle/kernel/k_transfer_memory.h
+++ b/src/core/hle/kernel/k_transfer_memory.h
@@ -1,6 +1,5 @@
-// Copyright 2021 yuzu emulator team
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
@@ -8,7 +7,7 @@
#include "core/hle/kernel/svc_types.h"
#include "core/hle/result.h"
-union ResultCode;
+union Result;
namespace Core::Memory {
class Memory;
@@ -27,7 +26,7 @@ public:
explicit KTransferMemory(KernelCore& kernel_);
~KTransferMemory() override;
- ResultCode Initialize(VAddr address_, std::size_t size_, Svc::MemoryPermission owner_perm_);
+ Result Initialize(VAddr address_, std::size_t size_, Svc::MemoryPermission owner_perm_);
void Finalize() override;
diff --git a/src/core/hle/kernel/k_worker_task.h b/src/core/hle/kernel/k_worker_task.h
index b7794c6a8..ef591d831 100644
--- a/src/core/hle/kernel/k_worker_task.h
+++ b/src/core/hle/kernel/k_worker_task.h
@@ -1,6 +1,5 @@
-// Copyright 2022 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
diff --git a/src/core/hle/kernel/k_worker_task_manager.cpp b/src/core/hle/kernel/k_worker_task_manager.cpp
index 785e08111..04042bf8f 100644
--- a/src/core/hle/kernel/k_worker_task_manager.cpp
+++ b/src/core/hle/kernel/k_worker_task_manager.cpp
@@ -1,6 +1,5 @@
-// Copyright 2022 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#include "common/assert.h"
#include "core/hle/kernel/k_process.h"
@@ -24,7 +23,7 @@ void KWorkerTask::DoWorkerTask() {
}
}
-KWorkerTaskManager::KWorkerTaskManager() : m_waiting_thread(1, "yuzu:KWorkerTaskManager") {}
+KWorkerTaskManager::KWorkerTaskManager() : m_waiting_thread(1, "KWorkerTaskManager") {}
void KWorkerTaskManager::AddTask(KernelCore& kernel, WorkerType type, KWorkerTask* task) {
ASSERT(type <= WorkerType::Count);
diff --git a/src/core/hle/kernel/k_worker_task_manager.h b/src/core/hle/kernel/k_worker_task_manager.h
index 43d1bfcec..f6618883e 100644
--- a/src/core/hle/kernel/k_worker_task_manager.h
+++ b/src/core/hle/kernel/k_worker_task_manager.h
@@ -1,6 +1,5 @@
-// Copyright 2022 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
diff --git a/src/core/hle/kernel/k_writable_event.cpp b/src/core/hle/kernel/k_writable_event.cpp
index bdb1db6d5..ff88c5acd 100644
--- a/src/core/hle/kernel/k_writable_event.cpp
+++ b/src/core/hle/kernel/k_writable_event.cpp
@@ -1,6 +1,5 @@
-// Copyright 2021 yuzu emulator team
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#include "core/hle/kernel/k_event.h"
#include "core/hle/kernel/k_readable_event.h"
@@ -19,11 +18,11 @@ void KWritableEvent::Initialize(KEvent* parent_event_, std::string&& name_) {
parent->GetReadableEvent().Open();
}
-ResultCode KWritableEvent::Signal() {
+Result KWritableEvent::Signal() {
return parent->GetReadableEvent().Signal();
}
-ResultCode KWritableEvent::Clear() {
+Result KWritableEvent::Clear() {
return parent->GetReadableEvent().Clear();
}
diff --git a/src/core/hle/kernel/k_writable_event.h b/src/core/hle/kernel/k_writable_event.h
index 858d982c4..3fd0c7d0a 100644
--- a/src/core/hle/kernel/k_writable_event.h
+++ b/src/core/hle/kernel/k_writable_event.h
@@ -1,6 +1,5 @@
-// Copyright 2021 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
@@ -26,8 +25,8 @@ public:
static void PostDestroy([[maybe_unused]] uintptr_t arg) {}
void Initialize(KEvent* parent_, std::string&& name_);
- ResultCode Signal();
- ResultCode Clear();
+ Result Signal();
+ Result Clear();
KEvent* GetParent() const {
return parent;
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index 49c0714ed..9251f29ad 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -1,6 +1,5 @@
-// Copyright 2021 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#include <array>
#include <atomic>
@@ -18,13 +17,10 @@
#include "common/thread.h"
#include "common/thread_worker.h"
#include "core/arm/arm_interface.h"
-#include "core/arm/cpu_interrupt_handler.h"
#include "core/arm/exclusive_monitor.h"
#include "core/core.h"
#include "core/core_timing.h"
-#include "core/core_timing_util.h"
#include "core/cpu_manager.h"
-#include "core/device_memory.h"
#include "core/hardware_properties.h"
#include "core/hle/kernel/init/init_slab_setup.h"
#include "core/hle/kernel/k_client_port.h"
@@ -35,7 +31,6 @@
#include "core/hle/kernel/k_resource_limit.h"
#include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/k_shared_memory.h"
-#include "core/hle/kernel/k_slab_heap.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/k_worker_task_manager.h"
#include "core/hle/kernel/kernel.h"
@@ -52,42 +47,41 @@ namespace Kernel {
struct KernelCore::Impl {
explicit Impl(Core::System& system_, KernelCore& kernel_)
- : time_manager{system_}, object_list_container{kernel_},
- service_threads_manager{1, "yuzu:ServiceThreadsManager"}, system{system_} {}
+ : time_manager{system_},
+ service_threads_manager{1, "ServiceThreadsManager"}, system{system_} {}
void SetMulticore(bool is_multi) {
is_multicore = is_multi;
}
void Initialize(KernelCore& kernel) {
+ global_object_list_container = std::make_unique<KAutoObjectWithListContainer>(kernel);
global_scheduler_context = std::make_unique<Kernel::GlobalSchedulerContext>(kernel);
global_handle_table = std::make_unique<Kernel::KHandleTable>(kernel);
global_handle_table->Initialize(KHandleTable::MaxTableSize);
+ default_service_thread = CreateServiceThread(kernel, "DefaultServiceThread");
is_phantom_mode_for_singlecore = false;
- InitializePhysicalCores();
-
// Derive the initial memory layout from the emulated board
Init::InitializeSlabResourceCounts(kernel);
- KMemoryLayout memory_layout;
- DeriveInitialMemoryLayout(memory_layout);
- Init::InitializeSlabHeaps(system, memory_layout);
+ DeriveInitialMemoryLayout();
+ Init::InitializeSlabHeaps(system, *memory_layout);
// Initialize kernel memory and resources.
- InitializeSystemResourceLimit(kernel, system.CoreTiming(), memory_layout);
- InitializeMemoryLayout(memory_layout);
- InitializePageSlab();
- InitializeSchedulers();
- InitializeSuspendThreads();
+ InitializeSystemResourceLimit(kernel, system.CoreTiming());
+ InitializeMemoryLayout();
+ Init::InitializeKPageBufferSlabHeap(system);
+ InitializeShutdownThreads();
InitializePreemption(kernel);
+ InitializePhysicalCores();
RegisterHostThread();
}
void InitializeCores() {
for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
- cores[core_id].Initialize(current_process->Is64BitProcess());
+ cores[core_id]->Initialize((*current_process).Is64BitProcess());
system.Memory().SetCurrentPageTable(*current_process, core_id);
}
}
@@ -98,39 +92,16 @@ struct KernelCore::Impl {
process_list.clear();
- // Close all open server ports.
- std::unordered_set<KServerPort*> server_ports_;
- {
- std::lock_guard lk(server_ports_lock);
- server_ports_ = server_ports;
- server_ports.clear();
- }
- for (auto* server_port : server_ports_) {
- server_port->Close();
- }
- // Close all open server sessions.
- std::unordered_set<KServerSession*> server_sessions_;
- {
- std::lock_guard lk(server_sessions_lock);
- server_sessions_ = server_sessions;
- server_sessions.clear();
- }
- for (auto* server_session : server_sessions_) {
- server_session->Close();
- }
-
- // Ensure that the object list container is finalized and properly shutdown.
- object_list_container.Finalize();
-
- // Ensures all service threads gracefully shutdown.
- ClearServiceThreads();
+ CloseServices();
next_object_id = 0;
next_kernel_process_id = KProcess::InitialKIPIDMin;
next_user_process_id = KProcess::ProcessIDMin;
next_thread_id = 1;
- cores.clear();
+ for (auto& core : cores) {
+ core = nullptr;
+ }
global_handle_table->Finalize();
global_handle_table.reset();
@@ -155,15 +126,15 @@ struct KernelCore::Impl {
CleanupObject(font_shared_mem);
CleanupObject(irs_shared_mem);
CleanupObject(time_shared_mem);
+ CleanupObject(hidbus_shared_mem);
CleanupObject(system_resource_limit);
for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
- if (suspend_threads[core_id]) {
- suspend_threads[core_id]->Close();
- suspend_threads[core_id] = nullptr;
+ if (shutdown_threads[core_id]) {
+ shutdown_threads[core_id]->Close();
+ shutdown_threads[core_id] = nullptr;
}
- schedulers[core_id]->Finalize();
schedulers[core_id].reset();
}
@@ -172,7 +143,7 @@ struct KernelCore::Impl {
// Close kernel objects that were not freed on shutdown
{
- std::lock_guard lk(registered_in_use_objects_lock);
+ std::scoped_lock lk{registered_in_use_objects_lock};
if (registered_in_use_objects.size()) {
for (auto& object : registered_in_use_objects) {
object->Close();
@@ -183,48 +154,76 @@ struct KernelCore::Impl {
// Shutdown all processes.
if (current_process) {
- current_process->Finalize();
+ (*current_process).Finalize();
// current_process->Close();
// TODO: The current process should be destroyed based on accurate ref counting after
// calling Close(). Adding a manual Destroy() call instead to avoid a memory leak.
- current_process->Destroy();
+ (*current_process).Destroy();
current_process = nullptr;
}
// Track kernel objects that were not freed on shutdown
{
- std::lock_guard lk(registered_objects_lock);
+ std::scoped_lock lk{registered_objects_lock};
if (registered_objects.size()) {
- LOG_WARNING(Kernel, "{} kernel objects were dangling on shutdown!",
- registered_objects.size());
+ LOG_DEBUG(Kernel, "{} kernel objects were dangling on shutdown!",
+ registered_objects.size());
registered_objects.clear();
}
}
+
+ // Ensure that the object list container is finalized and properly shutdown.
+ global_object_list_container->Finalize();
+ global_object_list_container.reset();
+ }
+
+ void CloseServices() {
+ // Close all open server sessions and ports.
+ std::unordered_set<KAutoObject*> server_objects_;
+ {
+ std::scoped_lock lk(server_objects_lock);
+ server_objects_ = server_objects;
+ server_objects.clear();
+ }
+ for (auto* server_object : server_objects_) {
+ server_object->Close();
+ }
+
+ // Ensures all service threads gracefully shutdown.
+ ClearServiceThreads();
}
void InitializePhysicalCores() {
exclusive_monitor =
Core::MakeExclusiveMonitor(system.Memory(), Core::Hardware::NUM_CPU_CORES);
for (u32 i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
- schedulers[i] = std::make_unique<Kernel::KScheduler>(system, i);
- cores.emplace_back(i, system, *schedulers[i], interrupts);
- }
- }
+ const s32 core{static_cast<s32>(i)};
- void InitializeSchedulers() {
- for (u32 i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
- cores[i].Scheduler().Initialize();
+ schedulers[i] = std::make_unique<Kernel::KScheduler>(system.Kernel());
+ cores[i] = std::make_unique<Kernel::PhysicalCore>(i, system, *schedulers[i]);
+
+ auto* main_thread{Kernel::KThread::Create(system.Kernel())};
+ main_thread->SetName(fmt::format("MainThread:{}", core));
+ main_thread->SetCurrentCore(core);
+ ASSERT(Kernel::KThread::InitializeMainThread(system, main_thread, core).IsSuccess());
+
+ auto* idle_thread{Kernel::KThread::Create(system.Kernel())};
+ idle_thread->SetCurrentCore(core);
+ ASSERT(Kernel::KThread::InitializeIdleThread(system, idle_thread, core).IsSuccess());
+
+ schedulers[i]->Initialize(main_thread, idle_thread, core);
}
}
// Creates the default system resource limit
void InitializeSystemResourceLimit(KernelCore& kernel,
- const Core::Timing::CoreTiming& core_timing,
- const KMemoryLayout& memory_layout) {
+ const Core::Timing::CoreTiming& core_timing) {
system_resource_limit = KResourceLimit::Create(system.Kernel());
system_resource_limit->Initialize(&core_timing);
- const auto [total_size, kernel_size] = memory_layout.GetTotalAndKernelMemorySizes();
+ const auto sizes{memory_layout->GetTotalAndKernelMemorySizes()};
+ const auto total_size{sizes.first};
+ const auto kernel_size{sizes.second};
// If setting the default system values fails, then something seriously wrong has occurred.
ASSERT(system_resource_limit->SetLimitValue(LimitableResource::PhysicalMemory, total_size)
@@ -240,37 +239,31 @@ struct KernelCore::Impl {
constexpr u64 secure_applet_memory_size{4_MiB};
ASSERT(system_resource_limit->Reserve(LimitableResource::PhysicalMemory,
secure_applet_memory_size));
-
- // This memory seems to be reserved on hardware, but is not reserved/used by yuzu.
- // Likely Horizon OS reserved memory
- // TODO(ameerj): Derive the memory rather than hardcode it.
- constexpr u64 unknown_reserved_memory{0x2f896000};
- ASSERT(system_resource_limit->Reserve(LimitableResource::PhysicalMemory,
- unknown_reserved_memory));
}
void InitializePreemption(KernelCore& kernel) {
preemption_event = Core::Timing::CreateEvent(
- "PreemptionCallback", [this, &kernel](std::uintptr_t, std::chrono::nanoseconds) {
+ "PreemptionCallback",
+ [this, &kernel](std::uintptr_t, s64 time,
+ std::chrono::nanoseconds) -> std::optional<std::chrono::nanoseconds> {
{
KScopedSchedulerLock lock(kernel);
global_scheduler_context->PreemptThreads();
}
- const auto time_interval = std::chrono::nanoseconds{std::chrono::milliseconds(10)};
- system.CoreTiming().ScheduleEvent(time_interval, preemption_event);
+ return std::nullopt;
});
const auto time_interval = std::chrono::nanoseconds{std::chrono::milliseconds(10)};
- system.CoreTiming().ScheduleEvent(time_interval, preemption_event);
+ system.CoreTiming().ScheduleLoopingEvent(time_interval, time_interval, preemption_event);
}
- void InitializeSuspendThreads() {
+ void InitializeShutdownThreads() {
for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
- suspend_threads[core_id] = KThread::Create(system.Kernel());
- ASSERT(KThread::InitializeHighPriorityThread(system, suspend_threads[core_id], {}, {},
+ shutdown_threads[core_id] = KThread::Create(system.Kernel());
+ ASSERT(KThread::InitializeHighPriorityThread(system, shutdown_threads[core_id], {}, {},
core_id)
.IsSuccess());
- suspend_threads[core_id]->SetName(fmt::format("SuspendThread:{}", core_id));
+ shutdown_threads[core_id]->SetName(fmt::format("SuspendThread:{}", core_id));
}
}
@@ -300,15 +293,16 @@ struct KernelCore::Impl {
// Gets the dummy KThread for the caller, allocating a new one if this is the first time
KThread* GetHostDummyThread() {
- auto make_thread = [this]() {
- KThread* thread = KThread::Create(system.Kernel());
+ auto initialize = [this](KThread* thread) {
ASSERT(KThread::InitializeDummyThread(thread).IsSuccess());
thread->SetName(fmt::format("DummyThread:{}", GetHostThreadId()));
return thread;
};
- thread_local KThread* saved_thread = make_thread();
- return saved_thread;
+ thread_local auto raw_thread = KThread(system.Kernel());
+ thread_local auto thread = initialize(&raw_thread);
+
+ return thread;
}
/// Registers a CPU core thread by allocating a host thread ID for it
@@ -347,6 +341,8 @@ struct KernelCore::Impl {
return is_shutting_down.load(std::memory_order_relaxed);
}
+ static inline thread_local KThread* current_thread{nullptr};
+
KThread* GetCurrentEmuThread() {
// If we are shutting down the kernel, none of this is relevant anymore.
if (IsShuttingDown()) {
@@ -357,19 +353,26 @@ struct KernelCore::Impl {
if (thread_id >= Core::Hardware::NUM_CPU_CORES) {
return GetHostDummyThread();
}
- return schedulers[thread_id]->GetCurrentThread();
+
+ return current_thread;
+ }
+
+ void SetCurrentEmuThread(KThread* thread) {
+ current_thread = thread;
}
- void DeriveInitialMemoryLayout(KMemoryLayout& memory_layout) {
+ void DeriveInitialMemoryLayout() {
+ memory_layout = std::make_unique<KMemoryLayout>();
+
// Insert the root region for the virtual memory tree, from which all other regions will
// derive.
- memory_layout.GetVirtualMemoryRegionTree().InsertDirectly(
+ memory_layout->GetVirtualMemoryRegionTree().InsertDirectly(
KernelVirtualAddressSpaceBase,
KernelVirtualAddressSpaceBase + KernelVirtualAddressSpaceSize - 1);
// Insert the root region for the physical memory tree, from which all other regions will
// derive.
- memory_layout.GetPhysicalMemoryRegionTree().InsertDirectly(
+ memory_layout->GetPhysicalMemoryRegionTree().InsertDirectly(
KernelPhysicalAddressSpaceBase,
KernelPhysicalAddressSpaceBase + KernelPhysicalAddressSpaceSize - 1);
@@ -386,7 +389,7 @@ struct KernelCore::Impl {
if (!(kernel_region_start + KernelRegionSize - 1 <= KernelVirtualAddressSpaceLast)) {
kernel_region_size = KernelVirtualAddressSpaceEnd - kernel_region_start;
}
- ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert(
+ ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
kernel_region_start, kernel_region_size, KMemoryRegionType_Kernel));
// Setup the code region.
@@ -395,11 +398,11 @@ struct KernelCore::Impl {
Common::AlignDown(code_start_virt_addr, CodeRegionAlign);
constexpr VAddr code_region_end = Common::AlignUp(code_end_virt_addr, CodeRegionAlign);
constexpr size_t code_region_size = code_region_end - code_region_start;
- ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert(
+ ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
code_region_start, code_region_size, KMemoryRegionType_KernelCode));
// Setup board-specific device physical regions.
- Init::SetupDevicePhysicalMemoryRegions(memory_layout);
+ Init::SetupDevicePhysicalMemoryRegions(*memory_layout);
// Determine the amount of space needed for the misc region.
size_t misc_region_needed_size;
@@ -408,7 +411,7 @@ struct KernelCore::Impl {
misc_region_needed_size = Core::Hardware::NUM_CPU_CORES * (3 * (PageSize + PageSize));
// Account for each auto-map device.
- for (const auto& region : memory_layout.GetPhysicalMemoryRegionTree()) {
+ for (const auto& region : memory_layout->GetPhysicalMemoryRegionTree()) {
if (region.HasTypeAttribute(KMemoryRegionAttr_ShouldKernelMap)) {
// Check that the region is valid.
ASSERT(region.GetEndAddress() != 0);
@@ -433,22 +436,22 @@ struct KernelCore::Impl {
// Setup the misc region.
const VAddr misc_region_start =
- memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegion(
+ memory_layout->GetVirtualMemoryRegionTree().GetRandomAlignedRegion(
misc_region_size, MiscRegionAlign, KMemoryRegionType_Kernel);
- ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert(
+ ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
misc_region_start, misc_region_size, KMemoryRegionType_KernelMisc));
// Setup the stack region.
constexpr size_t StackRegionSize = 14_MiB;
constexpr size_t StackRegionAlign = KernelAslrAlignment;
const VAddr stack_region_start =
- memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegion(
+ memory_layout->GetVirtualMemoryRegionTree().GetRandomAlignedRegion(
StackRegionSize, StackRegionAlign, KMemoryRegionType_Kernel);
- ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert(
+ ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
stack_region_start, StackRegionSize, KMemoryRegionType_KernelStack));
// Determine the size of the resource region.
- const size_t resource_region_size = memory_layout.GetResourceRegionSizeForInit();
+ const size_t resource_region_size = memory_layout->GetResourceRegionSizeForInit();
// Determine the size of the slab region.
const size_t slab_region_size =
@@ -465,23 +468,23 @@ struct KernelCore::Impl {
Common::AlignUp(code_end_phys_addr + slab_region_size, SlabRegionAlign) -
Common::AlignDown(code_end_phys_addr, SlabRegionAlign);
const VAddr slab_region_start =
- memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegion(
+ memory_layout->GetVirtualMemoryRegionTree().GetRandomAlignedRegion(
slab_region_needed_size, SlabRegionAlign, KMemoryRegionType_Kernel) +
(code_end_phys_addr % SlabRegionAlign);
- ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert(
+ ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
slab_region_start, slab_region_size, KMemoryRegionType_KernelSlab));
// Setup the temp region.
constexpr size_t TempRegionSize = 128_MiB;
constexpr size_t TempRegionAlign = KernelAslrAlignment;
const VAddr temp_region_start =
- memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegion(
+ memory_layout->GetVirtualMemoryRegionTree().GetRandomAlignedRegion(
TempRegionSize, TempRegionAlign, KMemoryRegionType_Kernel);
- ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert(temp_region_start, TempRegionSize,
- KMemoryRegionType_KernelTemp));
+ ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(temp_region_start, TempRegionSize,
+ KMemoryRegionType_KernelTemp));
// Automatically map in devices that have auto-map attributes.
- for (auto& region : memory_layout.GetPhysicalMemoryRegionTree()) {
+ for (auto& region : memory_layout->GetPhysicalMemoryRegionTree()) {
// We only care about kernel regions.
if (!region.IsDerivedFrom(KMemoryRegionType_Kernel)) {
continue;
@@ -508,21 +511,21 @@ struct KernelCore::Impl {
const size_t map_size =
Common::AlignUp(region.GetEndAddress(), PageSize) - map_phys_addr;
const VAddr map_virt_addr =
- memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegionWithGuard(
+ memory_layout->GetVirtualMemoryRegionTree().GetRandomAlignedRegionWithGuard(
map_size, PageSize, KMemoryRegionType_KernelMisc, PageSize);
- ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert(
+ ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
map_virt_addr, map_size, KMemoryRegionType_KernelMiscMappedDevice));
region.SetPairAddress(map_virt_addr + region.GetAddress() - map_phys_addr);
}
- Init::SetupDramPhysicalMemoryRegions(memory_layout);
+ Init::SetupDramPhysicalMemoryRegions(*memory_layout);
// Insert a physical region for the kernel code region.
- ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert(
+ ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert(
code_start_phys_addr, code_region_size, KMemoryRegionType_DramKernelCode));
// Insert a physical region for the kernel slab region.
- ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert(
+ ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert(
slab_start_phys_addr, slab_region_size, KMemoryRegionType_DramKernelSlab));
// Determine size available for kernel page table heaps, requiring > 8 MB.
@@ -531,12 +534,12 @@ struct KernelCore::Impl {
ASSERT(page_table_heap_size / 4_MiB > 2);
// Insert a physical region for the kernel page table heap region
- ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert(
+ ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert(
slab_end_phys_addr, page_table_heap_size, KMemoryRegionType_DramKernelPtHeap));
// All DRAM regions that we haven't tagged by this point will be mapped under the linear
// mapping. Tag them.
- for (auto& region : memory_layout.GetPhysicalMemoryRegionTree()) {
+ for (auto& region : memory_layout->GetPhysicalMemoryRegionTree()) {
if (region.GetType() == KMemoryRegionType_Dram) {
// Check that the region is valid.
ASSERT(region.GetEndAddress() != 0);
@@ -548,7 +551,7 @@ struct KernelCore::Impl {
// Get the linear region extents.
const auto linear_extents =
- memory_layout.GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(
+ memory_layout->GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(
KMemoryRegionAttr_LinearMapped);
ASSERT(linear_extents.GetEndAddress() != 0);
@@ -560,7 +563,7 @@ struct KernelCore::Impl {
Common::AlignUp(linear_extents.GetEndAddress(), LinearRegionAlign) -
aligned_linear_phys_start;
const VAddr linear_region_start =
- memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegionWithGuard(
+ memory_layout->GetVirtualMemoryRegionTree().GetRandomAlignedRegionWithGuard(
linear_region_size, LinearRegionAlign, KMemoryRegionType_None, LinearRegionAlign);
const u64 linear_region_phys_to_virt_diff = linear_region_start - aligned_linear_phys_start;
@@ -569,7 +572,7 @@ struct KernelCore::Impl {
{
PAddr cur_phys_addr = 0;
u64 cur_size = 0;
- for (auto& region : memory_layout.GetPhysicalMemoryRegionTree()) {
+ for (auto& region : memory_layout->GetPhysicalMemoryRegionTree()) {
if (!region.HasTypeAttribute(KMemoryRegionAttr_LinearMapped)) {
continue;
}
@@ -588,55 +591,49 @@ struct KernelCore::Impl {
const VAddr region_virt_addr =
region.GetAddress() + linear_region_phys_to_virt_diff;
- ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert(
+ ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
region_virt_addr, region.GetSize(),
GetTypeForVirtualLinearMapping(region.GetType())));
region.SetPairAddress(region_virt_addr);
KMemoryRegion* virt_region =
- memory_layout.GetVirtualMemoryRegionTree().FindModifiable(region_virt_addr);
+ memory_layout->GetVirtualMemoryRegionTree().FindModifiable(region_virt_addr);
ASSERT(virt_region != nullptr);
virt_region->SetPairAddress(region.GetAddress());
}
}
// Insert regions for the initial page table region.
- ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert(
+ ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert(
resource_end_phys_addr, KernelPageTableHeapSize, KMemoryRegionType_DramKernelInitPt));
- ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert(
+ ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
resource_end_phys_addr + linear_region_phys_to_virt_diff, KernelPageTableHeapSize,
KMemoryRegionType_VirtualDramKernelInitPt));
// All linear-mapped DRAM regions that we haven't tagged by this point will be allocated to
// some pool partition. Tag them.
- for (auto& region : memory_layout.GetPhysicalMemoryRegionTree()) {
+ for (auto& region : memory_layout->GetPhysicalMemoryRegionTree()) {
if (region.GetType() == (KMemoryRegionType_Dram | KMemoryRegionAttr_LinearMapped)) {
region.SetType(KMemoryRegionType_DramPoolPartition);
}
}
// Setup all other memory regions needed to arrange the pool partitions.
- Init::SetupPoolPartitionMemoryRegions(memory_layout);
+ Init::SetupPoolPartitionMemoryRegions(*memory_layout);
// Cache all linear regions in their own trees for faster access, later.
- memory_layout.InitializeLinearMemoryRegionTrees(aligned_linear_phys_start,
- linear_region_start);
+ memory_layout->InitializeLinearMemoryRegionTrees(aligned_linear_phys_start,
+ linear_region_start);
}
- void InitializeMemoryLayout(const KMemoryLayout& memory_layout) {
- const auto system_pool = memory_layout.GetKernelSystemPoolRegionPhysicalExtents();
- const auto applet_pool = memory_layout.GetKernelAppletPoolRegionPhysicalExtents();
- const auto application_pool = memory_layout.GetKernelApplicationPoolRegionPhysicalExtents();
+ void InitializeMemoryLayout() {
+ const auto system_pool = memory_layout->GetKernelSystemPoolRegionPhysicalExtents();
- // Initialize memory managers
+ // Initialize the memory manager.
memory_manager = std::make_unique<KMemoryManager>(system);
- memory_manager->InitializeManager(KMemoryManager::Pool::Application,
- application_pool.GetAddress(),
- application_pool.GetEndAddress());
- memory_manager->InitializeManager(KMemoryManager::Pool::Applet, applet_pool.GetAddress(),
- applet_pool.GetEndAddress());
- memory_manager->InitializeManager(KMemoryManager::Pool::System, system_pool.GetAddress(),
- system_pool.GetEndAddress());
+ const auto& management_region = memory_layout->GetPoolManagementRegion();
+ ASSERT(management_region.GetEndAddress() != 0);
+ memory_manager->Initialize(management_region.GetAddress(), management_region.GetSize());
// Setup memory regions for emulated processes
// TODO(bunnei): These should not be hardcoded regions initialized within the kernel
@@ -644,16 +641,20 @@ struct KernelCore::Impl {
constexpr std::size_t font_size{0x1100000};
constexpr std::size_t irs_size{0x8000};
constexpr std::size_t time_size{0x1000};
+ constexpr std::size_t hidbus_size{0x1000};
const PAddr hid_phys_addr{system_pool.GetAddress()};
const PAddr font_phys_addr{system_pool.GetAddress() + hid_size};
const PAddr irs_phys_addr{system_pool.GetAddress() + hid_size + font_size};
const PAddr time_phys_addr{system_pool.GetAddress() + hid_size + font_size + irs_size};
+ const PAddr hidbus_phys_addr{system_pool.GetAddress() + hid_size + font_size + irs_size +
+ time_size};
hid_shared_mem = KSharedMemory::Create(system.Kernel());
font_shared_mem = KSharedMemory::Create(system.Kernel());
irs_shared_mem = KSharedMemory::Create(system.Kernel());
time_shared_mem = KSharedMemory::Create(system.Kernel());
+ hidbus_shared_mem = KSharedMemory::Create(system.Kernel());
hid_shared_mem->Initialize(system.DeviceMemory(), nullptr,
{hid_phys_addr, hid_size / PageSize},
@@ -671,22 +672,10 @@ struct KernelCore::Impl {
{time_phys_addr, time_size / PageSize},
Svc::MemoryPermission::None, Svc::MemoryPermission::Read,
time_phys_addr, time_size, "Time:SharedMemory");
- }
-
- void InitializePageSlab() {
- // Allocate slab heaps
- user_slab_heap_pages =
- std::make_unique<KSlabHeap<Page>>(KSlabHeap<Page>::AllocationType::Guest);
-
- // TODO(ameerj): This should be derived, not hardcoded within the kernel
- constexpr u64 user_slab_heap_size{0x3de000};
- // Reserve slab heaps
- ASSERT(
- system_resource_limit->Reserve(LimitableResource::PhysicalMemory, user_slab_heap_size));
- // Initialize slab heap
- user_slab_heap_pages->Initialize(
- system.DeviceMemory().GetPointer(Core::DramMemoryMap::SlabHeapBase),
- user_slab_heap_size);
+ hidbus_shared_mem->Initialize(system.DeviceMemory(), nullptr,
+ {hidbus_phys_addr, hidbus_size / PageSize},
+ Svc::MemoryPermission::None, Svc::MemoryPermission::Read,
+ hidbus_phys_addr, hidbus_size, "HidBus:SharedMemory");
}
KClientPort* CreateNamedServicePort(std::string name) {
@@ -697,13 +686,20 @@ struct KernelCore::Impl {
}
KClientPort* port = &search->second(system.ServiceManager(), system);
- {
- std::lock_guard lk(server_ports_lock);
- server_ports.insert(&port->GetParent()->GetServerPort());
- }
+ RegisterServerObject(&port->GetParent()->GetServerPort());
return port;
}
+ void RegisterServerObject(KAutoObject* server_object) {
+ std::scoped_lock lk(server_objects_lock);
+ server_objects.insert(server_object);
+ }
+
+ void UnregisterServerObject(KAutoObject* server_object) {
+ std::scoped_lock lk(server_objects_lock);
+ server_objects.erase(server_object);
+ }
+
std::weak_ptr<Kernel::ServiceThread> CreateServiceThread(KernelCore& kernel,
const std::string& name) {
auto service_thread = std::make_shared<Kernel::ServiceThread>(kernel, 1, name);
@@ -716,6 +712,12 @@ struct KernelCore::Impl {
void ReleaseServiceThread(std::weak_ptr<Kernel::ServiceThread> service_thread) {
if (auto strong_ptr = service_thread.lock()) {
+ if (strong_ptr == default_service_thread.lock()) {
+ // Nothing to do here, the service is using default_service_thread, which will be
+ // released on shutdown.
+ return;
+ }
+
service_threads_manager.QueueWork(
[this, strong_ptr{std::move(strong_ptr)}]() { service_threads.erase(strong_ptr); });
}
@@ -725,8 +727,7 @@ struct KernelCore::Impl {
service_threads_manager.QueueWork([this]() { service_threads.clear(); });
}
- std::mutex server_ports_lock;
- std::mutex server_sessions_lock;
+ std::mutex server_objects_lock;
std::mutex registered_objects_lock;
std::mutex registered_in_use_objects_lock;
@@ -737,7 +738,7 @@ struct KernelCore::Impl {
// Lists all processes that exist in the current session.
std::vector<KProcess*> process_list;
- KProcess* current_process{};
+ std::atomic<KProcess*> current_process{};
std::unique_ptr<Kernel::GlobalSchedulerContext> global_scheduler_context;
Kernel::TimeManager time_manager;
@@ -750,39 +751,41 @@ struct KernelCore::Impl {
// stores all the objects in place.
std::unique_ptr<KHandleTable> global_handle_table;
- KAutoObjectWithListContainer object_list_container;
+ std::unique_ptr<KAutoObjectWithListContainer> global_object_list_container;
/// Map of named ports managed by the kernel, which can be retrieved using
/// the ConnectToPort SVC.
std::unordered_map<std::string, ServiceInterfaceFactory> service_interface_factory;
NamedPortTable named_ports;
- std::unordered_set<KServerPort*> server_ports;
- std::unordered_set<KServerSession*> server_sessions;
+ std::unordered_set<KAutoObject*> server_objects;
std::unordered_set<KAutoObject*> registered_objects;
std::unordered_set<KAutoObject*> registered_in_use_objects;
std::unique_ptr<Core::ExclusiveMonitor> exclusive_monitor;
- std::vector<Kernel::PhysicalCore> cores;
+ std::array<std::unique_ptr<Kernel::PhysicalCore>, Core::Hardware::NUM_CPU_CORES> cores;
// Next host thead ID to use, 0-3 IDs represent core threads, >3 represent others
std::atomic<u32> next_host_thread_id{Core::Hardware::NUM_CPU_CORES};
// Kernel memory management
std::unique_ptr<KMemoryManager> memory_manager;
- std::unique_ptr<KSlabHeap<Page>> user_slab_heap_pages;
// Shared memory for services
Kernel::KSharedMemory* hid_shared_mem{};
Kernel::KSharedMemory* font_shared_mem{};
Kernel::KSharedMemory* irs_shared_mem{};
Kernel::KSharedMemory* time_shared_mem{};
+ Kernel::KSharedMemory* hidbus_shared_mem{};
+
+ // Memory layout
+ std::unique_ptr<KMemoryLayout> memory_layout;
// Threads used for services
- std::unordered_set<std::shared_ptr<Kernel::ServiceThread>> service_threads;
+ std::unordered_set<std::shared_ptr<ServiceThread>> service_threads;
+ std::weak_ptr<ServiceThread> default_service_thread;
Common::ThreadWorker service_threads_manager;
- std::array<KThread*, Core::Hardware::NUM_CPU_CORES> suspend_threads;
- std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES> interrupts{};
+ std::array<KThread*, Core::Hardware::NUM_CPU_CORES> shutdown_threads;
std::array<std::unique_ptr<Kernel::KScheduler>, Core::Hardware::NUM_CPU_CORES> schedulers{};
bool is_multicore{};
@@ -818,6 +821,10 @@ void KernelCore::Shutdown() {
impl->Shutdown();
}
+void KernelCore::CloseServices() {
+ impl->CloseServices();
+}
+
const KResourceLimit* KernelCore::GetSystemResourceLimit() const {
return impl->system_resource_limit;
}
@@ -867,11 +874,11 @@ const Kernel::KScheduler& KernelCore::Scheduler(std::size_t id) const {
}
Kernel::PhysicalCore& KernelCore::PhysicalCore(std::size_t id) {
- return impl->cores[id];
+ return *impl->cores[id];
}
const Kernel::PhysicalCore& KernelCore::PhysicalCore(std::size_t id) const {
- return impl->cores[id];
+ return *impl->cores[id];
}
size_t KernelCore::CurrentPhysicalCoreIndex() const {
@@ -883,11 +890,11 @@ size_t KernelCore::CurrentPhysicalCoreIndex() const {
}
Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() {
- return impl->cores[CurrentPhysicalCoreIndex()];
+ return *impl->cores[CurrentPhysicalCoreIndex()];
}
const Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() const {
- return impl->cores[CurrentPhysicalCoreIndex()];
+ return *impl->cores[CurrentPhysicalCoreIndex()];
}
Kernel::KScheduler* KernelCore::CurrentScheduler() {
@@ -899,15 +906,6 @@ Kernel::KScheduler* KernelCore::CurrentScheduler() {
return impl->schedulers[core_id].get();
}
-std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& KernelCore::Interrupts() {
- return impl->interrupts;
-}
-
-const std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& KernelCore::Interrupts()
- const {
- return impl->interrupts;
-}
-
Kernel::TimeManager& KernelCore::TimeManager() {
return impl->time_manager;
}
@@ -925,25 +923,25 @@ const Core::ExclusiveMonitor& KernelCore::GetExclusiveMonitor() const {
}
KAutoObjectWithListContainer& KernelCore::ObjectListContainer() {
- return impl->object_list_container;
+ return *impl->global_object_list_container;
}
const KAutoObjectWithListContainer& KernelCore::ObjectListContainer() const {
- return impl->object_list_container;
+ return *impl->global_object_list_container;
}
void KernelCore::InvalidateAllInstructionCaches() {
for (auto& physical_core : impl->cores) {
- physical_core.ArmInterface().ClearInstructionCache();
+ physical_core->ArmInterface().ClearInstructionCache();
}
}
void KernelCore::InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size) {
for (auto& physical_core : impl->cores) {
- if (!physical_core.IsInitialized()) {
+ if (!physical_core->IsInitialized()) {
continue;
}
- physical_core.ArmInterface().InvalidateCacheRange(addr, size);
+ physical_core->ArmInterface().InvalidateCacheRange(addr, size);
}
}
@@ -959,33 +957,31 @@ KClientPort* KernelCore::CreateNamedServicePort(std::string name) {
return impl->CreateNamedServicePort(std::move(name));
}
-void KernelCore::RegisterServerSession(KServerSession* server_session) {
- std::lock_guard lk(impl->server_sessions_lock);
- impl->server_sessions.insert(server_session);
+void KernelCore::RegisterServerObject(KAutoObject* server_object) {
+ impl->RegisterServerObject(server_object);
}
-void KernelCore::UnregisterServerSession(KServerSession* server_session) {
- std::lock_guard lk(impl->server_sessions_lock);
- impl->server_sessions.erase(server_session);
+void KernelCore::UnregisterServerObject(KAutoObject* server_object) {
+ impl->UnregisterServerObject(server_object);
}
void KernelCore::RegisterKernelObject(KAutoObject* object) {
- std::lock_guard lk(impl->registered_objects_lock);
+ std::scoped_lock lk{impl->registered_objects_lock};
impl->registered_objects.insert(object);
}
void KernelCore::UnregisterKernelObject(KAutoObject* object) {
- std::lock_guard lk(impl->registered_objects_lock);
+ std::scoped_lock lk{impl->registered_objects_lock};
impl->registered_objects.erase(object);
}
void KernelCore::RegisterInUseObject(KAutoObject* object) {
- std::lock_guard lk(impl->registered_in_use_objects_lock);
+ std::scoped_lock lk{impl->registered_in_use_objects_lock};
impl->registered_in_use_objects.insert(object);
}
void KernelCore::UnregisterInUseObject(KAutoObject* object) {
- std::lock_guard lk(impl->registered_in_use_objects_lock);
+ std::scoped_lock lk{impl->registered_in_use_objects_lock};
impl->registered_in_use_objects.erase(object);
}
@@ -1033,6 +1029,10 @@ KThread* KernelCore::GetCurrentEmuThread() const {
return impl->GetCurrentEmuThread();
}
+void KernelCore::SetCurrentEmuThread(KThread* thread) {
+ impl->SetCurrentEmuThread(thread);
+}
+
KMemoryManager& KernelCore::MemoryManager() {
return *impl->memory_manager;
}
@@ -1041,14 +1041,6 @@ const KMemoryManager& KernelCore::MemoryManager() const {
return *impl->memory_manager;
}
-KSlabHeap<Page>& KernelCore::GetUserSlabHeapPages() {
- return *impl->user_slab_heap_pages;
-}
-
-const KSlabHeap<Page>& KernelCore::GetUserSlabHeapPages() const {
- return *impl->user_slab_heap_pages;
-}
-
Kernel::KSharedMemory& KernelCore::GetHidSharedMem() {
return *impl->hid_shared_mem;
}
@@ -1081,22 +1073,38 @@ const Kernel::KSharedMemory& KernelCore::GetTimeSharedMem() const {
return *impl->time_shared_mem;
}
-void KernelCore::Suspend(bool in_suspention) {
- const bool should_suspend = exception_exited || in_suspention;
- {
- KScopedSchedulerLock lock(*this);
- const auto state = should_suspend ? ThreadState::Runnable : ThreadState::Waiting;
- for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
- impl->suspend_threads[core_id]->SetState(state);
- impl->suspend_threads[core_id]->SetWaitReasonForDebugging(
- ThreadWaitReasonForDebugging::Suspended);
- if (!should_suspend) {
- impl->suspend_threads[core_id]->DisableDispatch();
+Kernel::KSharedMemory& KernelCore::GetHidBusSharedMem() {
+ return *impl->hidbus_shared_mem;
+}
+
+const Kernel::KSharedMemory& KernelCore::GetHidBusSharedMem() const {
+ return *impl->hidbus_shared_mem;
+}
+
+void KernelCore::Suspend(bool suspended) {
+ const bool should_suspend{exception_exited || suspended};
+ const auto activity = should_suspend ? ProcessActivity::Paused : ProcessActivity::Runnable;
+
+ for (auto* process : GetProcessList()) {
+ process->SetActivity(activity);
+
+ if (should_suspend) {
+ // Wait for execution to stop
+ for (auto* thread : process->GetThreadList()) {
+ thread->WaitUntilSuspended();
}
}
}
}
+void KernelCore::ShutdownCores() {
+ KScopedSchedulerLock lk{*this};
+
+ for (auto* thread : impl->shutdown_threads) {
+ void(thread->Run());
+ }
+}
+
bool KernelCore::IsMulticore() const {
return impl->is_multicore;
}
@@ -1122,6 +1130,10 @@ std::weak_ptr<Kernel::ServiceThread> KernelCore::CreateServiceThread(const std::
return impl->CreateServiceThread(*this, name);
}
+std::weak_ptr<Kernel::ServiceThread> KernelCore::GetDefaultServiceThread() const {
+ return impl->default_service_thread;
+}
+
void KernelCore::ReleaseServiceThread(std::weak_ptr<Kernel::ServiceThread> service_thread) {
impl->ReleaseServiceThread(service_thread);
}
@@ -1142,6 +1154,10 @@ const KWorkerTaskManager& KernelCore::WorkerTaskManager() const {
return impl->worker_task_manager;
}
+const KMemoryLayout& KernelCore::MemoryLayout() const {
+ return *impl->memory_layout;
+}
+
bool KernelCore::IsPhantomModeForSingleCore() const {
return impl->IsPhantomModeForSingleCore();
}
diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h
index 0e04fc3bb..bcf016a97 100644
--- a/src/core/hle/kernel/kernel.h
+++ b/src/core/hle/kernel/kernel.h
@@ -1,6 +1,5 @@
-// Copyright 2021 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
@@ -10,15 +9,12 @@
#include <string>
#include <unordered_map>
#include <vector>
-#include "core/arm/cpu_interrupt_handler.h"
#include "core/hardware_properties.h"
#include "core/hle/kernel/k_auto_object.h"
#include "core/hle/kernel/k_slab_heap.h"
-#include "core/hle/kernel/memory_types.h"
#include "core/hle/kernel/svc_common.h"
namespace Core {
-class CPUInterruptHandler;
class ExclusiveMonitor;
class System;
} // namespace Core
@@ -41,7 +37,9 @@ class KClientSession;
class KEvent;
class KHandleTable;
class KLinkedListNode;
+class KMemoryLayout;
class KMemoryManager;
+class KPageBuffer;
class KPort;
class KProcess;
class KResourceLimit;
@@ -51,6 +49,7 @@ class KSession;
class KSharedMemory;
class KSharedMemoryInfo;
class KThread;
+class KThreadLocalPage;
class KTransferMemory;
class KWorkerTaskManager;
class KWritableEvent;
@@ -108,6 +107,9 @@ public:
/// Clears all resources in use by the kernel instance.
void Shutdown();
+ /// Close all active services in use by the kernel instance.
+ void CloseServices();
+
/// Retrieves a shared pointer to the system resource limit instance.
const KResourceLimit* GetSystemResourceLimit() const;
@@ -179,10 +181,6 @@ public:
const KAutoObjectWithListContainer& ObjectListContainer() const;
- std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& Interrupts();
-
- const std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& Interrupts() const;
-
void InvalidateAllInstructionCaches();
void InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size);
@@ -193,13 +191,13 @@ public:
/// Opens a port to a service previously registered with RegisterNamedService.
KClientPort* CreateNamedServicePort(std::string name);
- /// Registers a server session with the gobal emulation state, to be freed on shutdown. This is
- /// necessary because we do not emulate processes for HLE sessions.
- void RegisterServerSession(KServerSession* server_session);
+ /// Registers a server session or port with the gobal emulation state, to be freed on shutdown.
+ /// This is necessary because we do not emulate processes for HLE sessions and ports.
+ void RegisterServerObject(KAutoObject* server_object);
- /// Unregisters a server session previously registered with RegisterServerSession when it was
- /// destroyed during the current emulation session.
- void UnregisterServerSession(KServerSession* server_session);
+ /// Unregisters a server session or port previously registered with RegisterServerSession when
+ /// it was destroyed during the current emulation session.
+ void UnregisterServerObject(KAutoObject* server_object);
/// Registers all kernel objects with the global emulation state, this is purely for tracking
/// leaks after emulation has been shutdown.
@@ -223,6 +221,9 @@ public:
/// Gets the current host_thread/guest_thread pointer.
KThread* GetCurrentEmuThread() const;
+ /// Sets the current guest_thread pointer.
+ void SetCurrentEmuThread(KThread* thread);
+
/// Gets the current host_thread handle.
u32 GetCurrentHostThreadID() const;
@@ -238,12 +239,6 @@ public:
/// Gets the virtual memory manager for the kernel.
const KMemoryManager& MemoryManager() const;
- /// Gets the slab heap allocated for user space pages.
- KSlabHeap<Page>& GetUserSlabHeapPages();
-
- /// Gets the slab heap allocated for user space pages.
- const KSlabHeap<Page>& GetUserSlabHeapPages() const;
-
/// Gets the shared memory object for HID services.
Kernel::KSharedMemory& GetHidSharedMem();
@@ -268,12 +263,21 @@ public:
/// Gets the shared memory object for Time services.
const Kernel::KSharedMemory& GetTimeSharedMem() const;
- /// Suspend/unsuspend the OS.
- void Suspend(bool in_suspention);
+ /// Gets the shared memory object for HIDBus services.
+ Kernel::KSharedMemory& GetHidBusSharedMem();
- /// Exceptional exit the OS.
+ /// Gets the shared memory object for HIDBus services.
+ const Kernel::KSharedMemory& GetHidBusSharedMem() const;
+
+ /// Suspend/unsuspend all processes.
+ void Suspend(bool suspend);
+
+ /// Exceptional exit all processes.
void ExceptionalExit();
+ /// Notify emulated CPU cores to shut down.
+ void ShutdownCores();
+
bool IsMulticore() const;
bool IsShuttingDown() const;
@@ -283,9 +287,11 @@ public:
void ExitSVCProfile();
/**
- * Creates an HLE service thread, which are used to execute service routines asynchronously.
- * While these are allocated per ServerSession, these need to be owned and managed outside
- * of ServerSession to avoid a circular dependency.
+ * Creates a host thread to execute HLE service requests, which are used to execute service
+ * routines asynchronously. While these are allocated per ServerSession, these need to be owned
+ * and managed outside of ServerSession to avoid a circular dependency. In general, most
+ * services can just use the default service thread, and not need their own host service thread.
+ * See GetDefaultServiceThread.
* @param name String name for the ServerSession creating this thread, used for debug
* purposes.
* @returns The a weak pointer newly created service thread.
@@ -293,6 +299,14 @@ public:
std::weak_ptr<Kernel::ServiceThread> CreateServiceThread(const std::string& name);
/**
+ * Gets the default host service thread, which executes HLE service requests. Unless service
+ * requests need to block on the host, the default service thread should be used in favor of
+ * creating a new service thread.
+ * @returns The a weak pointer for the default service thread.
+ */
+ std::weak_ptr<Kernel::ServiceThread> GetDefaultServiceThread() const;
+
+ /**
* Releases a HLE service thread, instructing KernelCore to free it. This should be called when
* the ServerSession associated with the thread is destroyed.
* @param service_thread Service thread to release.
@@ -335,6 +349,10 @@ public:
return slab_heap_container->writeable_event;
} else if constexpr (std::is_same_v<T, KCodeMemory>) {
return slab_heap_container->code_memory;
+ } else if constexpr (std::is_same_v<T, KPageBuffer>) {
+ return slab_heap_container->page_buffer;
+ } else if constexpr (std::is_same_v<T, KThreadLocalPage>) {
+ return slab_heap_container->thread_local_page;
}
}
@@ -350,6 +368,9 @@ public:
/// Gets the current worker task manager, used for dispatching KThread/KProcess tasks.
const KWorkerTaskManager& WorkerTaskManager() const;
+ /// Gets the memory layout.
+ const KMemoryLayout& MemoryLayout() const;
+
private:
friend class KProcess;
friend class KThread;
@@ -393,6 +414,8 @@ private:
KSlabHeap<KTransferMemory> transfer_memory;
KSlabHeap<KWritableEvent> writeable_event;
KSlabHeap<KCodeMemory> code_memory;
+ KSlabHeap<KPageBuffer> page_buffer;
+ KSlabHeap<KThreadLocalPage> thread_local_page;
};
std::unique_ptr<SlabHeapContainer> slab_heap_container;
diff --git a/src/core/hle/kernel/memory_types.h b/src/core/hle/kernel/memory_types.h
index d458f0eca..3975507bd 100644
--- a/src/core/hle/kernel/memory_types.h
+++ b/src/core/hle/kernel/memory_types.h
@@ -1,6 +1,5 @@
-// Copyright 2020 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
diff --git a/src/core/hle/kernel/physical_core.cpp b/src/core/hle/kernel/physical_core.cpp
index 7477668e4..d4375962f 100644
--- a/src/core/hle/kernel/physical_core.cpp
+++ b/src/core/hle/kernel/physical_core.cpp
@@ -1,9 +1,6 @@
-// Copyright 2020 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
-#include "common/spin_lock.h"
-#include "core/arm/cpu_interrupt_handler.h"
#include "core/arm/dynarmic/arm_dynarmic_32.h"
#include "core/arm/dynarmic/arm_dynarmic_64.h"
#include "core/core.h"
@@ -13,16 +10,14 @@
namespace Kernel {
-PhysicalCore::PhysicalCore(std::size_t core_index_, Core::System& system_, KScheduler& scheduler_,
- Core::CPUInterrupts& interrupts_)
- : core_index{core_index_}, system{system_}, scheduler{scheduler_},
- interrupts{interrupts_}, guard{std::make_unique<Common::SpinLock>()} {
+PhysicalCore::PhysicalCore(std::size_t core_index_, Core::System& system_, KScheduler& scheduler_)
+ : core_index{core_index_}, system{system_}, scheduler{scheduler_} {
#ifdef ARCHITECTURE_x86_64
// TODO(bunnei): Initialization relies on a core being available. We may later replace this with
// a 32-bit instance of Dynarmic. This should be abstracted out to a CPU manager.
auto& kernel = system.Kernel();
arm_interface = std::make_unique<Core::ARM_Dynarmic_64>(
- system, interrupts, kernel.IsMulticore(), kernel.GetExclusiveMonitor(), core_index);
+ system, kernel.IsMulticore(), kernel.GetExclusiveMonitor(), core_index);
#else
#error Platform not supported yet.
#endif
@@ -36,7 +31,7 @@ void PhysicalCore::Initialize([[maybe_unused]] bool is_64_bit) {
if (!is_64_bit) {
// We already initialized a 64-bit core, replace with a 32-bit one.
arm_interface = std::make_unique<Core::ARM_Dynarmic_32>(
- system, interrupts, kernel.IsMulticore(), kernel.GetExclusiveMonitor(), core_index);
+ system, kernel.IsMulticore(), kernel.GetExclusiveMonitor(), core_index);
}
#else
#error Platform not supported yet.
@@ -45,26 +40,30 @@ void PhysicalCore::Initialize([[maybe_unused]] bool is_64_bit) {
void PhysicalCore::Run() {
arm_interface->Run();
+ arm_interface->ClearExclusiveState();
}
void PhysicalCore::Idle() {
- interrupts[core_index].AwaitInterrupt();
+ std::unique_lock lk{guard};
+ on_interrupt.wait(lk, [this] { return is_interrupted; });
}
bool PhysicalCore::IsInterrupted() const {
- return interrupts[core_index].IsInterrupted();
+ return is_interrupted;
}
void PhysicalCore::Interrupt() {
- guard->lock();
- interrupts[core_index].SetInterrupt(true);
- guard->unlock();
+ std::unique_lock lk{guard};
+ is_interrupted = true;
+ arm_interface->SignalInterrupt();
+ on_interrupt.notify_all();
}
void PhysicalCore::ClearInterrupt() {
- guard->lock();
- interrupts[core_index].SetInterrupt(false);
- guard->unlock();
+ std::unique_lock lk{guard};
+ is_interrupted = false;
+ arm_interface->ClearInterrupt();
+ on_interrupt.notify_all();
}
} // namespace Kernel
diff --git a/src/core/hle/kernel/physical_core.h b/src/core/hle/kernel/physical_core.h
index 16a032e89..2fc8d4be2 100644
--- a/src/core/hle/kernel/physical_core.h
+++ b/src/core/hle/kernel/physical_core.h
@@ -1,24 +1,19 @@
-// Copyright 2020 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <cstddef>
#include <memory>
+#include <mutex>
#include "core/arm/arm_interface.h"
-namespace Common {
-class SpinLock;
-}
-
namespace Kernel {
class KScheduler;
} // namespace Kernel
namespace Core {
-class CPUInterruptHandler;
class ExclusiveMonitor;
class System;
} // namespace Core
@@ -27,15 +22,11 @@ namespace Kernel {
class PhysicalCore {
public:
- PhysicalCore(std::size_t core_index_, Core::System& system_, KScheduler& scheduler_,
- Core::CPUInterrupts& interrupts_);
+ PhysicalCore(std::size_t core_index_, Core::System& system_, KScheduler& scheduler_);
~PhysicalCore();
- PhysicalCore(const PhysicalCore&) = delete;
- PhysicalCore& operator=(const PhysicalCore&) = delete;
-
- PhysicalCore(PhysicalCore&&) = default;
- PhysicalCore& operator=(PhysicalCore&&) = delete;
+ YUZU_NON_COPYABLE(PhysicalCore);
+ YUZU_NON_MOVEABLE(PhysicalCore);
/// Initialize the core for the specified parameters.
void Initialize(bool is_64_bit);
@@ -90,9 +81,11 @@ private:
const std::size_t core_index;
Core::System& system;
Kernel::KScheduler& scheduler;
- Core::CPUInterrupts& interrupts;
- std::unique_ptr<Common::SpinLock> guard;
+
+ std::mutex guard;
+ std::condition_variable on_interrupt;
std::unique_ptr<Core::ARM_Interface> arm_interface;
+ bool is_interrupted;
};
} // namespace Kernel
diff --git a/src/core/hle/kernel/physical_memory.h b/src/core/hle/kernel/physical_memory.h
index 7a0266780..253fa4563 100644
--- a/src/core/hle/kernel/physical_memory.h
+++ b/src/core/hle/kernel/physical_memory.h
@@ -1,6 +1,5 @@
-// Copyright 2019 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
diff --git a/src/core/hle/kernel/process_capability.cpp b/src/core/hle/kernel/process_capability.cpp
index 31a0867d3..773319ad8 100644
--- a/src/core/hle/kernel/process_capability.cpp
+++ b/src/core/hle/kernel/process_capability.cpp
@@ -1,6 +1,5 @@
-// Copyright 2018 yuzu emulator team
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#include <bit>
@@ -69,9 +68,9 @@ u32 GetFlagBitOffset(CapabilityType type) {
} // Anonymous namespace
-ResultCode ProcessCapabilities::InitializeForKernelProcess(const u32* capabilities,
- std::size_t num_capabilities,
- KPageTable& page_table) {
+Result ProcessCapabilities::InitializeForKernelProcess(const u32* capabilities,
+ std::size_t num_capabilities,
+ KPageTable& page_table) {
Clear();
// Allow all cores and priorities.
@@ -82,9 +81,9 @@ ResultCode ProcessCapabilities::InitializeForKernelProcess(const u32* capabiliti
return ParseCapabilities(capabilities, num_capabilities, page_table);
}
-ResultCode ProcessCapabilities::InitializeForUserProcess(const u32* capabilities,
- std::size_t num_capabilities,
- KPageTable& page_table) {
+Result ProcessCapabilities::InitializeForUserProcess(const u32* capabilities,
+ std::size_t num_capabilities,
+ KPageTable& page_table) {
Clear();
return ParseCapabilities(capabilities, num_capabilities, page_table);
@@ -108,9 +107,8 @@ void ProcessCapabilities::InitializeForMetadatalessProcess() {
can_force_debug = true;
}
-ResultCode ProcessCapabilities::ParseCapabilities(const u32* capabilities,
- std::size_t num_capabilities,
- KPageTable& page_table) {
+Result ProcessCapabilities::ParseCapabilities(const u32* capabilities, std::size_t num_capabilities,
+ KPageTable& page_table) {
u32 set_flags = 0;
u32 set_svc_bits = 0;
@@ -156,8 +154,8 @@ ResultCode ProcessCapabilities::ParseCapabilities(const u32* capabilities,
return ResultSuccess;
}
-ResultCode ProcessCapabilities::ParseSingleFlagCapability(u32& set_flags, u32& set_svc_bits,
- u32 flag, KPageTable& page_table) {
+Result ProcessCapabilities::ParseSingleFlagCapability(u32& set_flags, u32& set_svc_bits, u32 flag,
+ KPageTable& page_table) {
const auto type = GetCapabilityType(flag);
if (type == CapabilityType::Unset) {
@@ -225,7 +223,7 @@ void ProcessCapabilities::Clear() {
can_force_debug = false;
}
-ResultCode ProcessCapabilities::HandlePriorityCoreNumFlags(u32 flags) {
+Result ProcessCapabilities::HandlePriorityCoreNumFlags(u32 flags) {
if (priority_mask != 0 || core_mask != 0) {
LOG_ERROR(Kernel, "Core or priority mask are not zero! priority_mask={}, core_mask={}",
priority_mask, core_mask);
@@ -267,7 +265,7 @@ ResultCode ProcessCapabilities::HandlePriorityCoreNumFlags(u32 flags) {
return ResultSuccess;
}
-ResultCode ProcessCapabilities::HandleSyscallFlags(u32& set_svc_bits, u32 flags) {
+Result ProcessCapabilities::HandleSyscallFlags(u32& set_svc_bits, u32 flags) {
const u32 index = flags >> 29;
const u32 svc_bit = 1U << index;
@@ -291,23 +289,23 @@ ResultCode ProcessCapabilities::HandleSyscallFlags(u32& set_svc_bits, u32 flags)
return ResultSuccess;
}
-ResultCode ProcessCapabilities::HandleMapPhysicalFlags(u32 flags, u32 size_flags,
- KPageTable& page_table) {
+Result ProcessCapabilities::HandleMapPhysicalFlags(u32 flags, u32 size_flags,
+ KPageTable& page_table) {
// TODO(Lioncache): Implement once the memory manager can handle this.
return ResultSuccess;
}
-ResultCode ProcessCapabilities::HandleMapIOFlags(u32 flags, KPageTable& page_table) {
+Result ProcessCapabilities::HandleMapIOFlags(u32 flags, KPageTable& page_table) {
// TODO(Lioncache): Implement once the memory manager can handle this.
return ResultSuccess;
}
-ResultCode ProcessCapabilities::HandleMapRegionFlags(u32 flags, KPageTable& page_table) {
+Result ProcessCapabilities::HandleMapRegionFlags(u32 flags, KPageTable& page_table) {
// TODO(Lioncache): Implement once the memory manager can handle this.
return ResultSuccess;
}
-ResultCode ProcessCapabilities::HandleInterruptFlags(u32 flags) {
+Result ProcessCapabilities::HandleInterruptFlags(u32 flags) {
constexpr u32 interrupt_ignore_value = 0x3FF;
const u32 interrupt0 = (flags >> 12) & 0x3FF;
const u32 interrupt1 = (flags >> 22) & 0x3FF;
@@ -334,7 +332,7 @@ ResultCode ProcessCapabilities::HandleInterruptFlags(u32 flags) {
return ResultSuccess;
}
-ResultCode ProcessCapabilities::HandleProgramTypeFlags(u32 flags) {
+Result ProcessCapabilities::HandleProgramTypeFlags(u32 flags) {
const u32 reserved = flags >> 17;
if (reserved != 0) {
LOG_ERROR(Kernel, "Reserved value is non-zero! reserved={}", reserved);
@@ -345,7 +343,7 @@ ResultCode ProcessCapabilities::HandleProgramTypeFlags(u32 flags) {
return ResultSuccess;
}
-ResultCode ProcessCapabilities::HandleKernelVersionFlags(u32 flags) {
+Result ProcessCapabilities::HandleKernelVersionFlags(u32 flags) {
// Yes, the internal member variable is checked in the actual kernel here.
// This might look odd for options that are only allowed to be initialized
// just once, however the kernel has a separate initialization function for
@@ -365,7 +363,7 @@ ResultCode ProcessCapabilities::HandleKernelVersionFlags(u32 flags) {
return ResultSuccess;
}
-ResultCode ProcessCapabilities::HandleHandleTableFlags(u32 flags) {
+Result ProcessCapabilities::HandleHandleTableFlags(u32 flags) {
const u32 reserved = flags >> 26;
if (reserved != 0) {
LOG_ERROR(Kernel, "Reserved value is non-zero! reserved={}", reserved);
@@ -376,7 +374,7 @@ ResultCode ProcessCapabilities::HandleHandleTableFlags(u32 flags) {
return ResultSuccess;
}
-ResultCode ProcessCapabilities::HandleDebugFlags(u32 flags) {
+Result ProcessCapabilities::HandleDebugFlags(u32 flags) {
const u32 reserved = flags >> 19;
if (reserved != 0) {
LOG_ERROR(Kernel, "Reserved value is non-zero! reserved={}", reserved);
diff --git a/src/core/hle/kernel/process_capability.h b/src/core/hle/kernel/process_capability.h
index a9b44325b..ff05dc5ff 100644
--- a/src/core/hle/kernel/process_capability.h
+++ b/src/core/hle/kernel/process_capability.h
@@ -1,6 +1,5 @@
-// Copyright 2018 yuzu emulator team
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
@@ -8,7 +7,7 @@
#include "common/common_types.h"
-union ResultCode;
+union Result;
namespace Kernel {
@@ -87,8 +86,8 @@ public:
/// @returns ResultSuccess if this capabilities instance was able to be initialized,
/// otherwise, an error code upon failure.
///
- ResultCode InitializeForKernelProcess(const u32* capabilities, std::size_t num_capabilities,
- KPageTable& page_table);
+ Result InitializeForKernelProcess(const u32* capabilities, std::size_t num_capabilities,
+ KPageTable& page_table);
/// Initializes this process capabilities instance for a userland process.
///
@@ -100,8 +99,8 @@ public:
/// @returns ResultSuccess if this capabilities instance was able to be initialized,
/// otherwise, an error code upon failure.
///
- ResultCode InitializeForUserProcess(const u32* capabilities, std::size_t num_capabilities,
- KPageTable& page_table);
+ Result InitializeForUserProcess(const u32* capabilities, std::size_t num_capabilities,
+ KPageTable& page_table);
/// Initializes this process capabilities instance for a process that does not
/// have any metadata to parse.
@@ -186,8 +185,8 @@ private:
///
/// @return ResultSuccess if no errors occur, otherwise an error code.
///
- ResultCode ParseCapabilities(const u32* capabilities, std::size_t num_capabilities,
- KPageTable& page_table);
+ Result ParseCapabilities(const u32* capabilities, std::size_t num_capabilities,
+ KPageTable& page_table);
/// Attempts to parse a capability descriptor that is only represented by a
/// single flag set.
@@ -201,8 +200,8 @@ private:
///
/// @return ResultSuccess if no errors occurred, otherwise an error code.
///
- ResultCode ParseSingleFlagCapability(u32& set_flags, u32& set_svc_bits, u32 flag,
- KPageTable& page_table);
+ Result ParseSingleFlagCapability(u32& set_flags, u32& set_svc_bits, u32 flag,
+ KPageTable& page_table);
/// Clears the internal state of this process capability instance. Necessary,
/// to have a sane starting point due to us allowing running executables without
@@ -220,34 +219,34 @@ private:
void Clear();
/// Handles flags related to the priority and core number capability flags.
- ResultCode HandlePriorityCoreNumFlags(u32 flags);
+ Result HandlePriorityCoreNumFlags(u32 flags);
/// Handles flags related to determining the allowable SVC mask.
- ResultCode HandleSyscallFlags(u32& set_svc_bits, u32 flags);
+ Result HandleSyscallFlags(u32& set_svc_bits, u32 flags);
/// Handles flags related to mapping physical memory pages.
- ResultCode HandleMapPhysicalFlags(u32 flags, u32 size_flags, KPageTable& page_table);
+ Result HandleMapPhysicalFlags(u32 flags, u32 size_flags, KPageTable& page_table);
/// Handles flags related to mapping IO pages.
- ResultCode HandleMapIOFlags(u32 flags, KPageTable& page_table);
+ Result HandleMapIOFlags(u32 flags, KPageTable& page_table);
/// Handles flags related to mapping physical memory regions.
- ResultCode HandleMapRegionFlags(u32 flags, KPageTable& page_table);
+ Result HandleMapRegionFlags(u32 flags, KPageTable& page_table);
/// Handles flags related to the interrupt capability flags.
- ResultCode HandleInterruptFlags(u32 flags);
+ Result HandleInterruptFlags(u32 flags);
/// Handles flags related to the program type.
- ResultCode HandleProgramTypeFlags(u32 flags);
+ Result HandleProgramTypeFlags(u32 flags);
/// Handles flags related to the handle table size.
- ResultCode HandleHandleTableFlags(u32 flags);
+ Result HandleHandleTableFlags(u32 flags);
/// Handles flags related to the kernel version capability flags.
- ResultCode HandleKernelVersionFlags(u32 flags);
+ Result HandleKernelVersionFlags(u32 flags);
/// Handles flags related to debug-specific capabilities.
- ResultCode HandleDebugFlags(u32 flags);
+ Result HandleDebugFlags(u32 flags);
SyscallCapabilities svc_capabilities;
InterruptCapabilities interrupt_capabilities;
diff --git a/src/core/hle/kernel/service_thread.cpp b/src/core/hle/kernel/service_thread.cpp
index 4eb3a5988..d23d76706 100644
--- a/src/core/hle/kernel/service_thread.cpp
+++ b/src/core/hle/kernel/service_thread.cpp
@@ -1,6 +1,5 @@
-// Copyright 2020 yuzu emulator team
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#include <condition_variable>
#include <functional>
@@ -37,7 +36,7 @@ ServiceThread::Impl::Impl(KernelCore& kernel, std::size_t num_threads, const std
: service_name{name} {
for (std::size_t i = 0; i < num_threads; ++i) {
threads.emplace_back([this, &kernel](std::stop_token stop_token) {
- Common::SetCurrentThreadName(std::string{"yuzu:HleService:" + service_name}.c_str());
+ Common::SetCurrentThreadName(std::string{service_name}.c_str());
// Wait for first request before trying to acquire a render context
{
@@ -49,12 +48,9 @@ ServiceThread::Impl::Impl(KernelCore& kernel, std::size_t num_threads, const std
return;
}
+ // Allocate a dummy guest thread for this host thread.
kernel.RegisterHostThread();
- // Ensure the dummy thread allocated for this host thread is closed on exit.
- auto* dummy_thread = kernel.GetCurrentEmuThread();
- SCOPE_EXIT({ dummy_thread->Close(); });
-
while (true) {
std::function<void()> task;
diff --git a/src/core/hle/kernel/service_thread.h b/src/core/hle/kernel/service_thread.h
index 6a7fd7c56..c5896f2bd 100644
--- a/src/core/hle/kernel/service_thread.h
+++ b/src/core/hle/kernel/service_thread.h
@@ -1,6 +1,5 @@
-// Copyright 2020 yuzu emulator team
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
diff --git a/src/core/hle/kernel/slab_helpers.h b/src/core/hle/kernel/slab_helpers.h
index f1c11256e..299a981a8 100644
--- a/src/core/hle/kernel/slab_helpers.h
+++ b/src/core/hle/kernel/slab_helpers.h
@@ -1,6 +1,5 @@
-// Copyright 2021 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
@@ -59,7 +58,7 @@ class KAutoObjectWithSlabHeapAndContainer : public Base {
private:
static Derived* Allocate(KernelCore& kernel) {
- return kernel.SlabHeap<Derived>().AllocateWithKernel(kernel);
+ return kernel.SlabHeap<Derived>().Allocate(kernel);
}
static void Free(KernelCore& kernel, Derived* obj) {
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index 9387373c1..27e5a805d 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -1,6 +1,5 @@
-// Copyright 2018 yuzu emulator team
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#include <algorithm>
#include <cinttypes>
@@ -16,6 +15,7 @@
#include "common/scope_exit.h"
#include "core/core.h"
#include "core/core_timing.h"
+#include "core/debugger/debugger.h"
#include "core/hle/kernel/k_client_port.h"
#include "core/hle/kernel/k_client_session.h"
#include "core/hle/kernel/k_code_memory.h"
@@ -58,8 +58,8 @@ constexpr bool IsValidAddressRange(VAddr address, u64 size) {
// Helper function that performs the common sanity checks for svcMapMemory
// and svcUnmapMemory. This is doable, as both functions perform their sanitizing
// in the same order.
-ResultCode MapUnmapMemorySanityChecks(const KPageTable& manager, VAddr dst_addr, VAddr src_addr,
- u64 size) {
+Result MapUnmapMemorySanityChecks(const KPageTable& manager, VAddr dst_addr, VAddr src_addr,
+ u64 size) {
if (!Common::Is4KBAligned(dst_addr)) {
LOG_ERROR(Kernel_SVC, "Destination address is not aligned to 4KB, 0x{:016X}", dst_addr);
return ResultInvalidAddress;
@@ -135,7 +135,7 @@ enum class ResourceLimitValueType {
} // Anonymous namespace
/// Set the process heap to a given Size. It can both extend and shrink the heap.
-static ResultCode SetHeapSize(Core::System& system, VAddr* out_address, u64 size) {
+static Result SetHeapSize(Core::System& system, VAddr* out_address, u64 size) {
LOG_TRACE(Kernel_SVC, "called, heap_size=0x{:X}", size);
// Validate size.
@@ -148,9 +148,9 @@ static ResultCode SetHeapSize(Core::System& system, VAddr* out_address, u64 size
return ResultSuccess;
}
-static ResultCode SetHeapSize32(Core::System& system, u32* heap_addr, u32 heap_size) {
+static Result SetHeapSize32(Core::System& system, u32* heap_addr, u32 heap_size) {
VAddr temp_heap_addr{};
- const ResultCode result{SetHeapSize(system, &temp_heap_addr, heap_size)};
+ const Result result{SetHeapSize(system, &temp_heap_addr, heap_size)};
*heap_addr = static_cast<u32>(temp_heap_addr);
return result;
}
@@ -166,8 +166,8 @@ constexpr bool IsValidSetMemoryPermission(MemoryPermission perm) {
}
}
-static ResultCode SetMemoryPermission(Core::System& system, VAddr address, u64 size,
- MemoryPermission perm) {
+static Result SetMemoryPermission(Core::System& system, VAddr address, u64 size,
+ MemoryPermission perm) {
LOG_DEBUG(Kernel_SVC, "called, address=0x{:016X}, size=0x{:X}, perm=0x{:08X", address, size,
perm);
@@ -188,8 +188,8 @@ static ResultCode SetMemoryPermission(Core::System& system, VAddr address, u64 s
return page_table.SetMemoryPermission(address, size, perm);
}
-static ResultCode SetMemoryAttribute(Core::System& system, VAddr address, u64 size, u32 mask,
- u32 attr) {
+static Result SetMemoryAttribute(Core::System& system, VAddr address, u64 size, u32 mask,
+ u32 attr) {
LOG_DEBUG(Kernel_SVC,
"called, address=0x{:016X}, size=0x{:X}, mask=0x{:08X}, attribute=0x{:08X}", address,
size, mask, attr);
@@ -213,19 +213,19 @@ static ResultCode SetMemoryAttribute(Core::System& system, VAddr address, u64 si
return page_table.SetMemoryAttribute(address, size, mask, attr);
}
-static ResultCode SetMemoryAttribute32(Core::System& system, u32 address, u32 size, u32 mask,
- u32 attr) {
+static Result SetMemoryAttribute32(Core::System& system, u32 address, u32 size, u32 mask,
+ u32 attr) {
return SetMemoryAttribute(system, address, size, mask, attr);
}
/// Maps a memory range into a different range.
-static ResultCode MapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr, u64 size) {
+static Result MapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr, u64 size) {
LOG_TRACE(Kernel_SVC, "called, dst_addr=0x{:X}, src_addr=0x{:X}, size=0x{:X}", dst_addr,
src_addr, size);
auto& page_table{system.Kernel().CurrentProcess()->PageTable()};
- if (const ResultCode result{MapUnmapMemorySanityChecks(page_table, dst_addr, src_addr, size)};
+ if (const Result result{MapUnmapMemorySanityChecks(page_table, dst_addr, src_addr, size)};
result.IsError()) {
return result;
}
@@ -233,18 +233,18 @@ static ResultCode MapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr
return page_table.MapMemory(dst_addr, src_addr, size);
}
-static ResultCode MapMemory32(Core::System& system, u32 dst_addr, u32 src_addr, u32 size) {
+static Result MapMemory32(Core::System& system, u32 dst_addr, u32 src_addr, u32 size) {
return MapMemory(system, dst_addr, src_addr, size);
}
/// Unmaps a region that was previously mapped with svcMapMemory
-static ResultCode UnmapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr, u64 size) {
+static Result UnmapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr, u64 size) {
LOG_TRACE(Kernel_SVC, "called, dst_addr=0x{:X}, src_addr=0x{:X}, size=0x{:X}", dst_addr,
src_addr, size);
auto& page_table{system.Kernel().CurrentProcess()->PageTable()};
- if (const ResultCode result{MapUnmapMemorySanityChecks(page_table, dst_addr, src_addr, size)};
+ if (const Result result{MapUnmapMemorySanityChecks(page_table, dst_addr, src_addr, size)};
result.IsError()) {
return result;
}
@@ -252,12 +252,12 @@ static ResultCode UnmapMemory(Core::System& system, VAddr dst_addr, VAddr src_ad
return page_table.UnmapMemory(dst_addr, src_addr, size);
}
-static ResultCode UnmapMemory32(Core::System& system, u32 dst_addr, u32 src_addr, u32 size) {
+static Result UnmapMemory32(Core::System& system, u32 dst_addr, u32 src_addr, u32 size) {
return UnmapMemory(system, dst_addr, src_addr, size);
}
/// Connect to an OS service given the port name, returns the handle to the port to out
-static ResultCode ConnectToNamedPort(Core::System& system, Handle* out, VAddr port_name_address) {
+static Result ConnectToNamedPort(Core::System& system, Handle* out, VAddr port_name_address) {
auto& memory = system.Memory();
if (!memory.IsValidVirtualAddress(port_name_address)) {
LOG_ERROR(Kernel_SVC,
@@ -307,14 +307,14 @@ static ResultCode ConnectToNamedPort(Core::System& system, Handle* out, VAddr po
return ResultSuccess;
}
-static ResultCode ConnectToNamedPort32(Core::System& system, Handle* out_handle,
- u32 port_name_address) {
+static Result ConnectToNamedPort32(Core::System& system, Handle* out_handle,
+ u32 port_name_address) {
return ConnectToNamedPort(system, out_handle, port_name_address);
}
/// Makes a blocking IPC call to an OS service.
-static ResultCode SendSyncRequest(Core::System& system, Handle handle) {
+static Result SendSyncRequest(Core::System& system, Handle handle) {
auto& kernel = system.Kernel();
// Create the wait queue.
@@ -327,7 +327,6 @@ static ResultCode SendSyncRequest(Core::System& system, Handle handle) {
LOG_TRACE(Kernel_SVC, "called handle=0x{:08X}({})", handle, session->GetName());
- auto thread = kernel.CurrentScheduler()->GetCurrentThread();
{
KScopedSchedulerLock lock(kernel);
@@ -337,15 +336,15 @@ static ResultCode SendSyncRequest(Core::System& system, Handle handle) {
session->SendSyncRequest(&GetCurrentThread(kernel), system.Memory(), system.CoreTiming());
}
- return thread->GetWaitResult();
+ return GetCurrentThread(kernel).GetWaitResult();
}
-static ResultCode SendSyncRequest32(Core::System& system, Handle handle) {
+static Result SendSyncRequest32(Core::System& system, Handle handle) {
return SendSyncRequest(system, handle);
}
/// Get the ID for the specified thread.
-static ResultCode GetThreadId(Core::System& system, u64* out_thread_id, Handle thread_handle) {
+static Result GetThreadId(Core::System& system, u64* out_thread_id, Handle thread_handle) {
// Get the thread from its handle.
KScopedAutoObject thread =
system.Kernel().CurrentProcess()->GetHandleTable().GetObject<KThread>(thread_handle);
@@ -356,10 +355,10 @@ static ResultCode GetThreadId(Core::System& system, u64* out_thread_id, Handle t
return ResultSuccess;
}
-static ResultCode GetThreadId32(Core::System& system, u32* out_thread_id_low,
- u32* out_thread_id_high, Handle thread_handle) {
+static Result GetThreadId32(Core::System& system, u32* out_thread_id_low, u32* out_thread_id_high,
+ Handle thread_handle) {
u64 out_thread_id{};
- const ResultCode result{GetThreadId(system, &out_thread_id, thread_handle)};
+ const Result result{GetThreadId(system, &out_thread_id, thread_handle)};
*out_thread_id_low = static_cast<u32>(out_thread_id >> 32);
*out_thread_id_high = static_cast<u32>(out_thread_id & std::numeric_limits<u32>::max());
@@ -368,7 +367,7 @@ static ResultCode GetThreadId32(Core::System& system, u32* out_thread_id_low,
}
/// Gets the ID of the specified process or a specified thread's owning process.
-static ResultCode GetProcessId(Core::System& system, u64* out_process_id, Handle handle) {
+static Result GetProcessId(Core::System& system, u64* out_process_id, Handle handle) {
LOG_DEBUG(Kernel_SVC, "called handle=0x{:08X}", handle);
// Get the object from the handle table.
@@ -399,8 +398,8 @@ static ResultCode GetProcessId(Core::System& system, u64* out_process_id, Handle
return ResultSuccess;
}
-static ResultCode GetProcessId32(Core::System& system, u32* out_process_id_low,
- u32* out_process_id_high, Handle handle) {
+static Result GetProcessId32(Core::System& system, u32* out_process_id_low,
+ u32* out_process_id_high, Handle handle) {
u64 out_process_id{};
const auto result = GetProcessId(system, &out_process_id, handle);
*out_process_id_low = static_cast<u32>(out_process_id);
@@ -409,8 +408,8 @@ static ResultCode GetProcessId32(Core::System& system, u32* out_process_id_low,
}
/// Wait for the given handles to synchronize, timeout after the specified nanoseconds
-static ResultCode WaitSynchronization(Core::System& system, s32* index, VAddr handles_address,
- s32 num_handles, s64 nano_seconds) {
+static Result WaitSynchronization(Core::System& system, s32* index, VAddr handles_address,
+ s32 num_handles, s64 nano_seconds) {
LOG_TRACE(Kernel_SVC, "called handles_address=0x{:X}, num_handles={}, nano_seconds={}",
handles_address, num_handles, nano_seconds);
@@ -445,14 +444,14 @@ static ResultCode WaitSynchronization(Core::System& system, s32* index, VAddr ha
nano_seconds);
}
-static ResultCode WaitSynchronization32(Core::System& system, u32 timeout_low, u32 handles_address,
- s32 num_handles, u32 timeout_high, s32* index) {
+static Result WaitSynchronization32(Core::System& system, u32 timeout_low, u32 handles_address,
+ s32 num_handles, u32 timeout_high, s32* index) {
const s64 nano_seconds{(static_cast<s64>(timeout_high) << 32) | static_cast<s64>(timeout_low)};
return WaitSynchronization(system, index, handles_address, num_handles, nano_seconds);
}
/// Resumes a thread waiting on WaitSynchronization
-static ResultCode CancelSynchronization(Core::System& system, Handle handle) {
+static Result CancelSynchronization(Core::System& system, Handle handle) {
LOG_TRACE(Kernel_SVC, "called handle=0x{:X}", handle);
// Get the thread from its handle.
@@ -465,13 +464,12 @@ static ResultCode CancelSynchronization(Core::System& system, Handle handle) {
return ResultSuccess;
}
-static ResultCode CancelSynchronization32(Core::System& system, Handle handle) {
+static Result CancelSynchronization32(Core::System& system, Handle handle) {
return CancelSynchronization(system, handle);
}
/// Attempts to locks a mutex
-static ResultCode ArbitrateLock(Core::System& system, Handle thread_handle, VAddr address,
- u32 tag) {
+static Result ArbitrateLock(Core::System& system, Handle thread_handle, VAddr address, u32 tag) {
LOG_TRACE(Kernel_SVC, "called thread_handle=0x{:08X}, address=0x{:X}, tag=0x{:08X}",
thread_handle, address, tag);
@@ -489,13 +487,12 @@ static ResultCode ArbitrateLock(Core::System& system, Handle thread_handle, VAdd
return system.Kernel().CurrentProcess()->WaitForAddress(thread_handle, address, tag);
}
-static ResultCode ArbitrateLock32(Core::System& system, Handle thread_handle, u32 address,
- u32 tag) {
+static Result ArbitrateLock32(Core::System& system, Handle thread_handle, u32 address, u32 tag) {
return ArbitrateLock(system, thread_handle, address, tag);
}
/// Unlock a mutex
-static ResultCode ArbitrateUnlock(Core::System& system, VAddr address) {
+static Result ArbitrateUnlock(Core::System& system, VAddr address) {
LOG_TRACE(Kernel_SVC, "called address=0x{:X}", address);
// Validate the input address.
@@ -513,7 +510,7 @@ static ResultCode ArbitrateUnlock(Core::System& system, VAddr address) {
return system.Kernel().CurrentProcess()->SignalToAddress(address);
}
-static ResultCode ArbitrateUnlock32(Core::System& system, u32 address) {
+static Result ArbitrateUnlock32(Core::System& system, u32 address) {
return ArbitrateUnlock(system, address);
}
@@ -624,10 +621,16 @@ static void Break(Core::System& system, u32 reason, u64 info1, u64 info2) {
handle_debug_buffer(info1, info2);
- auto* const current_thread = system.Kernel().CurrentScheduler()->GetCurrentThread();
+ auto* const current_thread = GetCurrentThreadPointer(system.Kernel());
const auto thread_processor_id = current_thread->GetActiveCore();
system.ArmInterface(static_cast<std::size_t>(thread_processor_id)).LogBacktrace();
}
+
+ if (system.DebuggerEnabled()) {
+ auto* thread = system.Kernel().GetCurrentEmuThread();
+ system.GetDebugger().NotifyThreadStopped(thread);
+ thread->RequestSuspend(Kernel::SuspendType::Debug);
+ }
}
static void Break32(Core::System& system, u32 reason, u32 info1, u32 info2) {
@@ -645,9 +648,13 @@ static void OutputDebugString(Core::System& system, VAddr address, u64 len) {
LOG_DEBUG(Debug_Emulated, "{}", str);
}
+static void OutputDebugString32(Core::System& system, u32 address, u32 len) {
+ OutputDebugString(system, address, len);
+}
+
/// Gets system/memory information for the current process
-static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, Handle handle,
- u64 info_sub_id) {
+static Result GetInfo(Core::System& system, u64* result, u64 info_id, Handle handle,
+ u64 info_sub_id) {
LOG_TRACE(Kernel_SVC, "called info_id=0x{:X}, info_sub_id=0x{:X}, handle=0x{:08X}", info_id,
info_sub_id, handle);
@@ -682,6 +689,9 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, Handle
// 6.0.0+
TotalPhysicalMemoryAvailableWithoutSystemResource = 21,
TotalPhysicalMemoryUsedWithoutSystemResource = 22,
+
+ // Homebrew only
+ MesosphereCurrentProcess = 65001,
};
const auto info_id_type = static_cast<GetInfoType>(info_id);
@@ -874,10 +884,10 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, Handle
const auto& core_timing = system.CoreTiming();
const auto& scheduler = *system.Kernel().CurrentScheduler();
- const auto* const current_thread = scheduler.GetCurrentThread();
+ const auto* const current_thread = GetCurrentThreadPointer(system.Kernel());
const bool same_thread = current_thread == thread.GetPointerUnsafe();
- const u64 prev_ctx_ticks = scheduler.GetLastContextSwitchTicks();
+ const u64 prev_ctx_ticks = scheduler.GetLastContextSwitchTime();
u64 out_ticks = 0;
if (same_thread && info_sub_id == 0xFFFFFFFFFFFFFFFF) {
const u64 thread_ticks = current_thread->GetCpuTime();
@@ -896,7 +906,7 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, Handle
// Verify the requested core is valid.
const bool core_valid =
- (info_sub_id == static_cast<u64>(-1ULL)) ||
+ (info_sub_id == 0xFFFFFFFFFFFFFFFF) ||
(info_sub_id == static_cast<u64>(system.Kernel().CurrentPhysicalCoreIndex()));
R_UNLESS(core_valid, ResultInvalidCombination);
@@ -904,18 +914,39 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, Handle
*result = system.Kernel().CurrentScheduler()->GetIdleThread()->GetCpuTime();
return ResultSuccess;
}
+ case GetInfoType::MesosphereCurrentProcess: {
+ // Verify the input handle is invalid.
+ R_UNLESS(handle == InvalidHandle, ResultInvalidHandle);
+
+ // Verify the sub-type is valid.
+ R_UNLESS(info_sub_id == 0, ResultInvalidCombination);
+
+ // Get the handle table.
+ KProcess* current_process = system.Kernel().CurrentProcess();
+ KHandleTable& handle_table = current_process->GetHandleTable();
+
+ // Get a new handle for the current process.
+ Handle tmp;
+ R_TRY(handle_table.Add(&tmp, current_process));
+
+ // Set the output.
+ *result = tmp;
+
+ // We succeeded.
+ return ResultSuccess;
+ }
default:
LOG_ERROR(Kernel_SVC, "Unimplemented svcGetInfo id=0x{:016X}", info_id);
return ResultInvalidEnumValue;
}
}
-static ResultCode GetInfo32(Core::System& system, u32* result_low, u32* result_high, u32 sub_id_low,
- u32 info_id, u32 handle, u32 sub_id_high) {
+static Result GetInfo32(Core::System& system, u32* result_low, u32* result_high, u32 sub_id_low,
+ u32 info_id, u32 handle, u32 sub_id_high) {
const u64 sub_id{u64{sub_id_low} | (u64{sub_id_high} << 32)};
u64 res_value{};
- const ResultCode result{GetInfo(system, &res_value, info_id, handle, sub_id)};
+ const Result result{GetInfo(system, &res_value, info_id, handle, sub_id)};
*result_high = static_cast<u32>(res_value >> 32);
*result_low = static_cast<u32>(res_value & std::numeric_limits<u32>::max());
@@ -923,7 +954,7 @@ static ResultCode GetInfo32(Core::System& system, u32* result_low, u32* result_h
}
/// Maps memory at a desired address
-static ResultCode MapPhysicalMemory(Core::System& system, VAddr addr, u64 size) {
+static Result MapPhysicalMemory(Core::System& system, VAddr addr, u64 size) {
LOG_DEBUG(Kernel_SVC, "called, addr=0x{:016X}, size=0x{:X}", addr, size);
if (!Common::Is4KBAligned(addr)) {
@@ -971,12 +1002,12 @@ static ResultCode MapPhysicalMemory(Core::System& system, VAddr addr, u64 size)
return page_table.MapPhysicalMemory(addr, size);
}
-static ResultCode MapPhysicalMemory32(Core::System& system, u32 addr, u32 size) {
+static Result MapPhysicalMemory32(Core::System& system, u32 addr, u32 size) {
return MapPhysicalMemory(system, addr, size);
}
/// Unmaps memory previously mapped via MapPhysicalMemory
-static ResultCode UnmapPhysicalMemory(Core::System& system, VAddr addr, u64 size) {
+static Result UnmapPhysicalMemory(Core::System& system, VAddr addr, u64 size) {
LOG_DEBUG(Kernel_SVC, "called, addr=0x{:016X}, size=0x{:X}", addr, size);
if (!Common::Is4KBAligned(addr)) {
@@ -1024,13 +1055,13 @@ static ResultCode UnmapPhysicalMemory(Core::System& system, VAddr addr, u64 size
return page_table.UnmapPhysicalMemory(addr, size);
}
-static ResultCode UnmapPhysicalMemory32(Core::System& system, u32 addr, u32 size) {
+static Result UnmapPhysicalMemory32(Core::System& system, u32 addr, u32 size) {
return UnmapPhysicalMemory(system, addr, size);
}
/// Sets the thread activity
-static ResultCode SetThreadActivity(Core::System& system, Handle thread_handle,
- ThreadActivity thread_activity) {
+static Result SetThreadActivity(Core::System& system, Handle thread_handle,
+ ThreadActivity thread_activity) {
LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, activity=0x{:08X}", thread_handle,
thread_activity);
@@ -1055,13 +1086,13 @@ static ResultCode SetThreadActivity(Core::System& system, Handle thread_handle,
return ResultSuccess;
}
-static ResultCode SetThreadActivity32(Core::System& system, Handle thread_handle,
- Svc::ThreadActivity thread_activity) {
+static Result SetThreadActivity32(Core::System& system, Handle thread_handle,
+ Svc::ThreadActivity thread_activity) {
return SetThreadActivity(system, thread_handle, thread_activity);
}
/// Gets the thread context
-static ResultCode GetThreadContext(Core::System& system, VAddr out_context, Handle thread_handle) {
+static Result GetThreadContext(Core::System& system, VAddr out_context, Handle thread_handle) {
LOG_DEBUG(Kernel_SVC, "called, out_context=0x{:08X}, thread_handle=0x{:X}", out_context,
thread_handle);
@@ -1093,7 +1124,7 @@ static ResultCode GetThreadContext(Core::System& system, VAddr out_context, Hand
if (thread->GetRawState() != ThreadState::Runnable) {
bool current = false;
for (auto i = 0; i < static_cast<s32>(Core::Hardware::NUM_CPU_CORES); ++i) {
- if (thread.GetPointerUnsafe() == kernel.Scheduler(i).GetCurrentThread()) {
+ if (thread.GetPointerUnsafe() == kernel.Scheduler(i).GetSchedulerCurrentThread()) {
current = true;
break;
}
@@ -1118,12 +1149,12 @@ static ResultCode GetThreadContext(Core::System& system, VAddr out_context, Hand
return ResultSuccess;
}
-static ResultCode GetThreadContext32(Core::System& system, u32 out_context, Handle thread_handle) {
+static Result GetThreadContext32(Core::System& system, u32 out_context, Handle thread_handle) {
return GetThreadContext(system, out_context, thread_handle);
}
/// Gets the priority for the specified thread
-static ResultCode GetThreadPriority(Core::System& system, u32* out_priority, Handle handle) {
+static Result GetThreadPriority(Core::System& system, u32* out_priority, Handle handle) {
LOG_TRACE(Kernel_SVC, "called");
// Get the thread from its handle.
@@ -1136,12 +1167,12 @@ static ResultCode GetThreadPriority(Core::System& system, u32* out_priority, Han
return ResultSuccess;
}
-static ResultCode GetThreadPriority32(Core::System& system, u32* out_priority, Handle handle) {
+static Result GetThreadPriority32(Core::System& system, u32* out_priority, Handle handle) {
return GetThreadPriority(system, out_priority, handle);
}
/// Sets the priority for the specified thread
-static ResultCode SetThreadPriority(Core::System& system, Handle thread_handle, u32 priority) {
+static Result SetThreadPriority(Core::System& system, Handle thread_handle, u32 priority) {
// Get the current process.
KProcess& process = *system.Kernel().CurrentProcess();
@@ -1159,7 +1190,7 @@ static ResultCode SetThreadPriority(Core::System& system, Handle thread_handle,
return ResultSuccess;
}
-static ResultCode SetThreadPriority32(Core::System& system, Handle thread_handle, u32 priority) {
+static Result SetThreadPriority32(Core::System& system, Handle thread_handle, u32 priority) {
return SetThreadPriority(system, thread_handle, priority);
}
@@ -1219,8 +1250,8 @@ constexpr bool IsValidUnmapFromOwnerCodeMemoryPermission(Svc::MemoryPermission p
} // Anonymous namespace
-static ResultCode MapSharedMemory(Core::System& system, Handle shmem_handle, VAddr address,
- u64 size, Svc::MemoryPermission map_perm) {
+static Result MapSharedMemory(Core::System& system, Handle shmem_handle, VAddr address, u64 size,
+ Svc::MemoryPermission map_perm) {
LOG_TRACE(Kernel_SVC,
"called, shared_memory_handle=0x{:X}, addr=0x{:X}, size=0x{:X}, permissions=0x{:08X}",
shmem_handle, address, size, map_perm);
@@ -1260,13 +1291,13 @@ static ResultCode MapSharedMemory(Core::System& system, Handle shmem_handle, VAd
return ResultSuccess;
}
-static ResultCode MapSharedMemory32(Core::System& system, Handle shmem_handle, u32 address,
- u32 size, Svc::MemoryPermission map_perm) {
+static Result MapSharedMemory32(Core::System& system, Handle shmem_handle, u32 address, u32 size,
+ Svc::MemoryPermission map_perm) {
return MapSharedMemory(system, shmem_handle, address, size, map_perm);
}
-static ResultCode UnmapSharedMemory(Core::System& system, Handle shmem_handle, VAddr address,
- u64 size) {
+static Result UnmapSharedMemory(Core::System& system, Handle shmem_handle, VAddr address,
+ u64 size) {
// Validate the address/size.
R_UNLESS(Common::IsAligned(address, PageSize), ResultInvalidAddress);
R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize);
@@ -1293,13 +1324,13 @@ static ResultCode UnmapSharedMemory(Core::System& system, Handle shmem_handle, V
return ResultSuccess;
}
-static ResultCode UnmapSharedMemory32(Core::System& system, Handle shmem_handle, u32 address,
- u32 size) {
+static Result UnmapSharedMemory32(Core::System& system, Handle shmem_handle, u32 address,
+ u32 size) {
return UnmapSharedMemory(system, shmem_handle, address, size);
}
-static ResultCode SetProcessMemoryPermission(Core::System& system, Handle process_handle,
- VAddr address, u64 size, Svc::MemoryPermission perm) {
+static Result SetProcessMemoryPermission(Core::System& system, Handle process_handle, VAddr address,
+ u64 size, Svc::MemoryPermission perm) {
LOG_TRACE(Kernel_SVC,
"called, process_handle=0x{:X}, addr=0x{:X}, size=0x{:X}, permissions=0x{:08X}",
process_handle, address, size, perm);
@@ -1328,8 +1359,8 @@ static ResultCode SetProcessMemoryPermission(Core::System& system, Handle proces
return page_table.SetProcessMemoryPermission(address, size, perm);
}
-static ResultCode MapProcessMemory(Core::System& system, VAddr dst_address, Handle process_handle,
- VAddr src_address, u64 size) {
+static Result MapProcessMemory(Core::System& system, VAddr dst_address, Handle process_handle,
+ VAddr src_address, u64 size) {
LOG_TRACE(Kernel_SVC,
"called, dst_address=0x{:X}, process_handle=0x{:X}, src_address=0x{:X}, size=0x{:X}",
dst_address, process_handle, src_address, size);
@@ -1358,8 +1389,11 @@ static ResultCode MapProcessMemory(Core::System& system, VAddr dst_address, Hand
ResultInvalidMemoryRegion);
// Create a new page group.
- KMemoryInfo kBlockInfo = dst_pt.QueryInfo(dst_address);
- KPageLinkedList pg(kBlockInfo.GetAddress(), kBlockInfo.GetNumPages());
+ KPageGroup pg;
+ R_TRY(src_pt.MakeAndOpenPageGroup(
+ std::addressof(pg), src_address, size / PageSize, KMemoryState::FlagCanMapProcess,
+ KMemoryState::FlagCanMapProcess, KMemoryPermission::None, KMemoryPermission::None,
+ KMemoryAttribute::All, KMemoryAttribute::None));
// Map the group.
R_TRY(dst_pt.MapPages(dst_address, pg, KMemoryState::SharedCode,
@@ -1368,8 +1402,8 @@ static ResultCode MapProcessMemory(Core::System& system, VAddr dst_address, Hand
return ResultSuccess;
}
-static ResultCode UnmapProcessMemory(Core::System& system, VAddr dst_address, Handle process_handle,
- VAddr src_address, u64 size) {
+static Result UnmapProcessMemory(Core::System& system, VAddr dst_address, Handle process_handle,
+ VAddr src_address, u64 size) {
LOG_TRACE(Kernel_SVC,
"called, dst_address=0x{:X}, process_handle=0x{:X}, src_address=0x{:X}, size=0x{:X}",
dst_address, process_handle, src_address, size);
@@ -1403,9 +1437,9 @@ static ResultCode UnmapProcessMemory(Core::System& system, VAddr dst_address, Ha
return ResultSuccess;
}
-static ResultCode CreateCodeMemory(Core::System& system, Handle* out, VAddr address, size_t size) {
- LOG_TRACE(Kernel_SVC, "called, handle_out=0x{:X}, address=0x{:X}, size=0x{:X}",
- static_cast<void*>(out), address, size);
+static Result CreateCodeMemory(Core::System& system, Handle* out, VAddr address, size_t size) {
+ LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, size=0x{:X}", address, size);
+
// Get kernel instance.
auto& kernel = system.Kernel();
@@ -1438,8 +1472,12 @@ static ResultCode CreateCodeMemory(Core::System& system, Handle* out, VAddr addr
return ResultSuccess;
}
-static ResultCode ControlCodeMemory(Core::System& system, Handle code_memory_handle, u32 operation,
- VAddr address, size_t size, Svc::MemoryPermission perm) {
+static Result CreateCodeMemory32(Core::System& system, Handle* out, u32 address, u32 size) {
+ return CreateCodeMemory(system, out, address, size);
+}
+
+static Result ControlCodeMemory(Core::System& system, Handle code_memory_handle, u32 operation,
+ VAddr address, size_t size, Svc::MemoryPermission perm) {
LOG_TRACE(Kernel_SVC,
"called, code_memory_handle=0x{:X}, operation=0x{:X}, address=0x{:X}, size=0x{:X}, "
@@ -1517,9 +1555,13 @@ static ResultCode ControlCodeMemory(Core::System& system, Handle code_memory_han
return ResultSuccess;
}
-static ResultCode QueryProcessMemory(Core::System& system, VAddr memory_info_address,
- VAddr page_info_address, Handle process_handle,
- VAddr address) {
+static Result ControlCodeMemory32(Core::System& system, Handle code_memory_handle, u32 operation,
+ u64 address, u64 size, Svc::MemoryPermission perm) {
+ return ControlCodeMemory(system, code_memory_handle, operation, address, size, perm);
+}
+
+static Result QueryProcessMemory(Core::System& system, VAddr memory_info_address,
+ VAddr page_info_address, Handle process_handle, VAddr address) {
LOG_TRACE(Kernel_SVC, "called process=0x{:08X} address={:X}", process_handle, address);
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
KScopedAutoObject process = handle_table.GetObject<KProcess>(process_handle);
@@ -1547,8 +1589,8 @@ static ResultCode QueryProcessMemory(Core::System& system, VAddr memory_info_add
return ResultSuccess;
}
-static ResultCode QueryMemory(Core::System& system, VAddr memory_info_address,
- VAddr page_info_address, VAddr query_address) {
+static Result QueryMemory(Core::System& system, VAddr memory_info_address, VAddr page_info_address,
+ VAddr query_address) {
LOG_TRACE(Kernel_SVC,
"called, memory_info_address=0x{:016X}, page_info_address=0x{:016X}, "
"query_address=0x{:016X}",
@@ -1558,13 +1600,13 @@ static ResultCode QueryMemory(Core::System& system, VAddr memory_info_address,
query_address);
}
-static ResultCode QueryMemory32(Core::System& system, u32 memory_info_address,
- u32 page_info_address, u32 query_address) {
+static Result QueryMemory32(Core::System& system, u32 memory_info_address, u32 page_info_address,
+ u32 query_address) {
return QueryMemory(system, memory_info_address, page_info_address, query_address);
}
-static ResultCode MapProcessCodeMemory(Core::System& system, Handle process_handle, u64 dst_address,
- u64 src_address, u64 size) {
+static Result MapProcessCodeMemory(Core::System& system, Handle process_handle, u64 dst_address,
+ u64 src_address, u64 size) {
LOG_DEBUG(Kernel_SVC,
"called. process_handle=0x{:08X}, dst_address=0x{:016X}, "
"src_address=0x{:016X}, size=0x{:016X}",
@@ -1631,8 +1673,8 @@ static ResultCode MapProcessCodeMemory(Core::System& system, Handle process_hand
return page_table.MapCodeMemory(dst_address, src_address, size);
}
-static ResultCode UnmapProcessCodeMemory(Core::System& system, Handle process_handle,
- u64 dst_address, u64 src_address, u64 size) {
+static Result UnmapProcessCodeMemory(Core::System& system, Handle process_handle, u64 dst_address,
+ u64 src_address, u64 size) {
LOG_DEBUG(Kernel_SVC,
"called. process_handle=0x{:08X}, dst_address=0x{:016X}, src_address=0x{:016X}, "
"size=0x{:016X}",
@@ -1650,7 +1692,7 @@ static ResultCode UnmapProcessCodeMemory(Core::System& system, Handle process_ha
return ResultInvalidAddress;
}
- if (size == 0 || Common::Is4KBAligned(size)) {
+ if (size == 0 || !Common::Is4KBAligned(size)) {
LOG_ERROR(Kernel_SVC, "Size is zero or not page-aligned (size=0x{:016X}).", size);
return ResultInvalidSize;
}
@@ -1696,17 +1738,19 @@ static ResultCode UnmapProcessCodeMemory(Core::System& system, Handle process_ha
return ResultInvalidMemoryRegion;
}
- return page_table.UnmapCodeMemory(dst_address, src_address, size);
+ return page_table.UnmapCodeMemory(dst_address, src_address, size,
+ KPageTable::ICacheInvalidationStrategy::InvalidateAll);
}
/// Exits the current process
static void ExitProcess(Core::System& system) {
auto* current_process = system.Kernel().CurrentProcess();
- UNIMPLEMENTED();
LOG_INFO(Kernel_SVC, "Process {} exiting", current_process->GetProcessID());
ASSERT_MSG(current_process->GetStatus() == ProcessStatus::Running,
"Process has already exited");
+
+ system.Exit();
}
static void ExitProcess32(Core::System& system) {
@@ -1722,8 +1766,8 @@ constexpr bool IsValidVirtualCoreId(int32_t core_id) {
} // Anonymous namespace
/// Creates a new thread
-static ResultCode CreateThread(Core::System& system, Handle* out_handle, VAddr entry_point, u64 arg,
- VAddr stack_bottom, u32 priority, s32 core_id) {
+static Result CreateThread(Core::System& system, Handle* out_handle, VAddr entry_point, u64 arg,
+ VAddr stack_bottom, u32 priority, s32 core_id) {
LOG_DEBUG(Kernel_SVC,
"called entry_point=0x{:08X}, arg=0x{:08X}, stack_bottom=0x{:08X}, "
"priority=0x{:08X}, core_id=0x{:08X}",
@@ -1794,13 +1838,13 @@ static ResultCode CreateThread(Core::System& system, Handle* out_handle, VAddr e
return ResultSuccess;
}
-static ResultCode CreateThread32(Core::System& system, Handle* out_handle, u32 priority,
- u32 entry_point, u32 arg, u32 stack_top, s32 processor_id) {
+static Result CreateThread32(Core::System& system, Handle* out_handle, u32 priority,
+ u32 entry_point, u32 arg, u32 stack_top, s32 processor_id) {
return CreateThread(system, out_handle, entry_point, arg, stack_top, priority, processor_id);
}
/// Starts the thread for the provided handle
-static ResultCode StartThread(Core::System& system, Handle thread_handle) {
+static Result StartThread(Core::System& system, Handle thread_handle) {
LOG_DEBUG(Kernel_SVC, "called thread=0x{:08X}", thread_handle);
// Get the thread from its handle.
@@ -1818,7 +1862,7 @@ static ResultCode StartThread(Core::System& system, Handle thread_handle) {
return ResultSuccess;
}
-static ResultCode StartThread32(Core::System& system, Handle thread_handle) {
+static Result StartThread32(Core::System& system, Handle thread_handle) {
return StartThread(system, thread_handle);
}
@@ -1826,7 +1870,7 @@ static ResultCode StartThread32(Core::System& system, Handle thread_handle) {
static void ExitThread(Core::System& system) {
LOG_DEBUG(Kernel_SVC, "called, pc=0x{:08X}", system.CurrentArmInterface().GetPC());
- auto* const current_thread = system.Kernel().CurrentScheduler()->GetCurrentThread();
+ auto* const current_thread = GetCurrentThreadPointer(system.Kernel());
system.GlobalSchedulerContext().RemoveThread(current_thread);
current_thread->Exit();
system.Kernel().UnregisterInUseObject(current_thread);
@@ -1859,7 +1903,7 @@ static void SleepThread(Core::System& system, s64 nanoseconds) {
KScheduler::YieldToAnyThread(kernel);
} else {
// Nintendo does nothing at all if an otherwise invalid value is passed.
- UNREACHABLE_MSG("Unimplemented sleep yield type '{:016X}'!", nanoseconds);
+ ASSERT_MSG(false, "Unimplemented sleep yield type '{:016X}'!", nanoseconds);
}
}
@@ -1869,8 +1913,8 @@ static void SleepThread32(Core::System& system, u32 nanoseconds_low, u32 nanosec
}
/// Wait process wide key atomic
-static ResultCode WaitProcessWideKeyAtomic(Core::System& system, VAddr address, VAddr cv_key,
- u32 tag, s64 timeout_ns) {
+static Result WaitProcessWideKeyAtomic(Core::System& system, VAddr address, VAddr cv_key, u32 tag,
+ s64 timeout_ns) {
LOG_TRACE(Kernel_SVC, "called address={:X}, cv_key={:X}, tag=0x{:08X}, timeout_ns={}", address,
cv_key, tag, timeout_ns);
@@ -1905,8 +1949,8 @@ static ResultCode WaitProcessWideKeyAtomic(Core::System& system, VAddr address,
address, Common::AlignDown(cv_key, sizeof(u32)), tag, timeout);
}
-static ResultCode WaitProcessWideKeyAtomic32(Core::System& system, u32 address, u32 cv_key, u32 tag,
- u32 timeout_ns_low, u32 timeout_ns_high) {
+static Result WaitProcessWideKeyAtomic32(Core::System& system, u32 address, u32 cv_key, u32 tag,
+ u32 timeout_ns_low, u32 timeout_ns_high) {
const auto timeout_ns = static_cast<s64>(timeout_ns_low | (u64{timeout_ns_high} << 32));
return WaitProcessWideKeyAtomic(system, address, cv_key, tag, timeout_ns);
}
@@ -1951,8 +1995,8 @@ constexpr bool IsValidArbitrationType(Svc::ArbitrationType type) {
} // namespace
// Wait for an address (via Address Arbiter)
-static ResultCode WaitForAddress(Core::System& system, VAddr address, Svc::ArbitrationType arb_type,
- s32 value, s64 timeout_ns) {
+static Result WaitForAddress(Core::System& system, VAddr address, Svc::ArbitrationType arb_type,
+ s32 value, s64 timeout_ns) {
LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, arb_type=0x{:X}, value=0x{:X}, timeout_ns={}",
address, arb_type, value, timeout_ns);
@@ -1989,15 +2033,15 @@ static ResultCode WaitForAddress(Core::System& system, VAddr address, Svc::Arbit
return system.Kernel().CurrentProcess()->WaitAddressArbiter(address, arb_type, value, timeout);
}
-static ResultCode WaitForAddress32(Core::System& system, u32 address, Svc::ArbitrationType arb_type,
- s32 value, u32 timeout_ns_low, u32 timeout_ns_high) {
+static Result WaitForAddress32(Core::System& system, u32 address, Svc::ArbitrationType arb_type,
+ s32 value, u32 timeout_ns_low, u32 timeout_ns_high) {
const auto timeout = static_cast<s64>(timeout_ns_low | (u64{timeout_ns_high} << 32));
return WaitForAddress(system, address, arb_type, value, timeout);
}
// Signals to an address (via Address Arbiter)
-static ResultCode SignalToAddress(Core::System& system, VAddr address, Svc::SignalType signal_type,
- s32 value, s32 count) {
+static Result SignalToAddress(Core::System& system, VAddr address, Svc::SignalType signal_type,
+ s32 value, s32 count) {
LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, signal_type=0x{:X}, value=0x{:X}, count=0x{:X}",
address, signal_type, value, count);
@@ -2038,8 +2082,8 @@ static void SynchronizePreemptionState(Core::System& system) {
}
}
-static ResultCode SignalToAddress32(Core::System& system, u32 address, Svc::SignalType signal_type,
- s32 value, s32 count) {
+static Result SignalToAddress32(Core::System& system, u32 address, Svc::SignalType signal_type,
+ s32 value, s32 count) {
return SignalToAddress(system, address, signal_type, value, count);
}
@@ -2077,7 +2121,7 @@ static void GetSystemTick32(Core::System& system, u32* time_low, u32* time_high)
}
/// Close a handle
-static ResultCode CloseHandle(Core::System& system, Handle handle) {
+static Result CloseHandle(Core::System& system, Handle handle) {
LOG_TRACE(Kernel_SVC, "Closing handle 0x{:08X}", handle);
// Remove the handle.
@@ -2087,12 +2131,12 @@ static ResultCode CloseHandle(Core::System& system, Handle handle) {
return ResultSuccess;
}
-static ResultCode CloseHandle32(Core::System& system, Handle handle) {
+static Result CloseHandle32(Core::System& system, Handle handle) {
return CloseHandle(system, handle);
}
/// Clears the signaled state of an event or process.
-static ResultCode ResetSignal(Core::System& system, Handle handle) {
+static Result ResetSignal(Core::System& system, Handle handle) {
LOG_DEBUG(Kernel_SVC, "called handle 0x{:08X}", handle);
// Get the current handle table.
@@ -2119,7 +2163,7 @@ static ResultCode ResetSignal(Core::System& system, Handle handle) {
return ResultInvalidHandle;
}
-static ResultCode ResetSignal32(Core::System& system, Handle handle) {
+static Result ResetSignal32(Core::System& system, Handle handle) {
return ResetSignal(system, handle);
}
@@ -2139,8 +2183,8 @@ constexpr bool IsValidTransferMemoryPermission(MemoryPermission perm) {
} // Anonymous namespace
/// Creates a TransferMemory object
-static ResultCode CreateTransferMemory(Core::System& system, Handle* out, VAddr address, u64 size,
- MemoryPermission map_perm) {
+static Result CreateTransferMemory(Core::System& system, Handle* out, VAddr address, u64 size,
+ MemoryPermission map_perm) {
auto& kernel = system.Kernel();
// Validate the size.
@@ -2186,13 +2230,13 @@ static ResultCode CreateTransferMemory(Core::System& system, Handle* out, VAddr
return ResultSuccess;
}
-static ResultCode CreateTransferMemory32(Core::System& system, Handle* out, u32 address, u32 size,
- MemoryPermission map_perm) {
+static Result CreateTransferMemory32(Core::System& system, Handle* out, u32 address, u32 size,
+ MemoryPermission map_perm) {
return CreateTransferMemory(system, out, address, size, map_perm);
}
-static ResultCode GetThreadCoreMask(Core::System& system, Handle thread_handle, s32* out_core_id,
- u64* out_affinity_mask) {
+static Result GetThreadCoreMask(Core::System& system, Handle thread_handle, s32* out_core_id,
+ u64* out_affinity_mask) {
LOG_TRACE(Kernel_SVC, "called, handle=0x{:08X}", thread_handle);
// Get the thread from its handle.
@@ -2206,8 +2250,8 @@ static ResultCode GetThreadCoreMask(Core::System& system, Handle thread_handle,
return ResultSuccess;
}
-static ResultCode GetThreadCoreMask32(Core::System& system, Handle thread_handle, s32* out_core_id,
- u32* out_affinity_mask_low, u32* out_affinity_mask_high) {
+static Result GetThreadCoreMask32(Core::System& system, Handle thread_handle, s32* out_core_id,
+ u32* out_affinity_mask_low, u32* out_affinity_mask_high) {
u64 out_affinity_mask{};
const auto result = GetThreadCoreMask(system, thread_handle, out_core_id, &out_affinity_mask);
*out_affinity_mask_high = static_cast<u32>(out_affinity_mask >> 32);
@@ -2215,8 +2259,8 @@ static ResultCode GetThreadCoreMask32(Core::System& system, Handle thread_handle
return result;
}
-static ResultCode SetThreadCoreMask(Core::System& system, Handle thread_handle, s32 core_id,
- u64 affinity_mask) {
+static Result SetThreadCoreMask(Core::System& system, Handle thread_handle, s32 core_id,
+ u64 affinity_mask) {
// Determine the core id/affinity mask.
if (core_id == IdealCoreUseProcessValue) {
core_id = system.Kernel().CurrentProcess()->GetIdealCoreId();
@@ -2247,13 +2291,13 @@ static ResultCode SetThreadCoreMask(Core::System& system, Handle thread_handle,
return ResultSuccess;
}
-static ResultCode SetThreadCoreMask32(Core::System& system, Handle thread_handle, s32 core_id,
- u32 affinity_mask_low, u32 affinity_mask_high) {
+static Result SetThreadCoreMask32(Core::System& system, Handle thread_handle, s32 core_id,
+ u32 affinity_mask_low, u32 affinity_mask_high) {
const auto affinity_mask = u64{affinity_mask_low} | (u64{affinity_mask_high} << 32);
return SetThreadCoreMask(system, thread_handle, core_id, affinity_mask);
}
-static ResultCode SignalEvent(Core::System& system, Handle event_handle) {
+static Result SignalEvent(Core::System& system, Handle event_handle) {
LOG_DEBUG(Kernel_SVC, "called, event_handle=0x{:08X}", event_handle);
// Get the current handle table.
@@ -2266,11 +2310,11 @@ static ResultCode SignalEvent(Core::System& system, Handle event_handle) {
return writable_event->Signal();
}
-static ResultCode SignalEvent32(Core::System& system, Handle event_handle) {
+static Result SignalEvent32(Core::System& system, Handle event_handle) {
return SignalEvent(system, event_handle);
}
-static ResultCode ClearEvent(Core::System& system, Handle event_handle) {
+static Result ClearEvent(Core::System& system, Handle event_handle) {
LOG_TRACE(Kernel_SVC, "called, event_handle=0x{:08X}", event_handle);
// Get the current handle table.
@@ -2297,11 +2341,11 @@ static ResultCode ClearEvent(Core::System& system, Handle event_handle) {
return ResultInvalidHandle;
}
-static ResultCode ClearEvent32(Core::System& system, Handle event_handle) {
+static Result ClearEvent32(Core::System& system, Handle event_handle) {
return ClearEvent(system, event_handle);
}
-static ResultCode CreateEvent(Core::System& system, Handle* out_write, Handle* out_read) {
+static Result CreateEvent(Core::System& system, Handle* out_write, Handle* out_read) {
LOG_DEBUG(Kernel_SVC, "called");
// Get the kernel reference and handle table.
@@ -2318,7 +2362,7 @@ static ResultCode CreateEvent(Core::System& system, Handle* out_write, Handle* o
R_UNLESS(event != nullptr, ResultOutOfResource);
// Initialize the event.
- event->Initialize("CreateEvent");
+ event->Initialize("CreateEvent", kernel.CurrentProcess());
// Commit the thread reservation.
event_reservation.Commit();
@@ -2346,11 +2390,11 @@ static ResultCode CreateEvent(Core::System& system, Handle* out_write, Handle* o
return ResultSuccess;
}
-static ResultCode CreateEvent32(Core::System& system, Handle* out_write, Handle* out_read) {
+static Result CreateEvent32(Core::System& system, Handle* out_write, Handle* out_read) {
return CreateEvent(system, out_write, out_read);
}
-static ResultCode GetProcessInfo(Core::System& system, u64* out, Handle process_handle, u32 type) {
+static Result GetProcessInfo(Core::System& system, u64* out, Handle process_handle, u32 type) {
LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, type=0x{:X}", process_handle, type);
// This function currently only allows retrieving a process' status.
@@ -2376,7 +2420,7 @@ static ResultCode GetProcessInfo(Core::System& system, u64* out, Handle process_
return ResultSuccess;
}
-static ResultCode CreateResourceLimit(Core::System& system, Handle* out_handle) {
+static Result CreateResourceLimit(Core::System& system, Handle* out_handle) {
LOG_DEBUG(Kernel_SVC, "called");
// Create a new resource limit.
@@ -2399,9 +2443,8 @@ static ResultCode CreateResourceLimit(Core::System& system, Handle* out_handle)
return ResultSuccess;
}
-static ResultCode GetResourceLimitLimitValue(Core::System& system, u64* out_limit_value,
- Handle resource_limit_handle,
- LimitableResource which) {
+static Result GetResourceLimitLimitValue(Core::System& system, u64* out_limit_value,
+ Handle resource_limit_handle, LimitableResource which) {
LOG_DEBUG(Kernel_SVC, "called, resource_limit_handle={:08X}, which={}", resource_limit_handle,
which);
@@ -2420,9 +2463,8 @@ static ResultCode GetResourceLimitLimitValue(Core::System& system, u64* out_limi
return ResultSuccess;
}
-static ResultCode GetResourceLimitCurrentValue(Core::System& system, u64* out_current_value,
- Handle resource_limit_handle,
- LimitableResource which) {
+static Result GetResourceLimitCurrentValue(Core::System& system, u64* out_current_value,
+ Handle resource_limit_handle, LimitableResource which) {
LOG_DEBUG(Kernel_SVC, "called, resource_limit_handle={:08X}, which={}", resource_limit_handle,
which);
@@ -2441,8 +2483,8 @@ static ResultCode GetResourceLimitCurrentValue(Core::System& system, u64* out_cu
return ResultSuccess;
}
-static ResultCode SetResourceLimitLimitValue(Core::System& system, Handle resource_limit_handle,
- LimitableResource which, u64 limit_value) {
+static Result SetResourceLimitLimitValue(Core::System& system, Handle resource_limit_handle,
+ LimitableResource which, u64 limit_value) {
LOG_DEBUG(Kernel_SVC, "called, resource_limit_handle={:08X}, which={}, limit_value={}",
resource_limit_handle, which, limit_value);
@@ -2461,8 +2503,8 @@ static ResultCode SetResourceLimitLimitValue(Core::System& system, Handle resour
return ResultSuccess;
}
-static ResultCode GetProcessList(Core::System& system, u32* out_num_processes,
- VAddr out_process_ids, u32 out_process_ids_size) {
+static Result GetProcessList(Core::System& system, u32* out_num_processes, VAddr out_process_ids,
+ u32 out_process_ids_size) {
LOG_DEBUG(Kernel_SVC, "called. out_process_ids=0x{:016X}, out_process_ids_size={}",
out_process_ids, out_process_ids_size);
@@ -2498,8 +2540,8 @@ static ResultCode GetProcessList(Core::System& system, u32* out_num_processes,
return ResultSuccess;
}
-static ResultCode GetThreadList(Core::System& system, u32* out_num_threads, VAddr out_thread_ids,
- u32 out_thread_ids_size, Handle debug_handle) {
+static Result GetThreadList(Core::System& system, u32* out_num_threads, VAddr out_thread_ids,
+ u32 out_thread_ids_size, Handle debug_handle) {
// TODO: Handle this case when debug events are supported.
UNIMPLEMENTED_IF(debug_handle != InvalidHandle);
@@ -2513,7 +2555,7 @@ static ResultCode GetThreadList(Core::System& system, u32* out_num_threads, VAdd
return ResultOutOfRange;
}
- const auto* const current_process = system.Kernel().CurrentProcess();
+ auto* const current_process = system.Kernel().CurrentProcess();
const auto total_copy_size = out_thread_ids_size * sizeof(u64);
if (out_thread_ids_size > 0 &&
@@ -2538,9 +2580,9 @@ static ResultCode GetThreadList(Core::System& system, u32* out_num_threads, VAdd
return ResultSuccess;
}
-static ResultCode FlushProcessDataCache32([[maybe_unused]] Core::System& system,
- [[maybe_unused]] Handle handle,
- [[maybe_unused]] u32 address, [[maybe_unused]] u32 size) {
+static Result FlushProcessDataCache32([[maybe_unused]] Core::System& system,
+ [[maybe_unused]] Handle handle, [[maybe_unused]] u32 address,
+ [[maybe_unused]] u32 size) {
// Note(Blinkhawk): For emulation purposes of the data cache this is mostly a no-op,
// as all emulation is done in the same cache level in host architecture, thus data cache
// does not need flushing.
@@ -2559,9 +2601,9 @@ struct FunctionDef {
} // namespace
static const FunctionDef SVC_Table_32[] = {
- {0x00, nullptr, "Unknown"},
+ {0x00, nullptr, "Unknown0"},
{0x01, SvcWrap32<SetHeapSize32>, "SetHeapSize32"},
- {0x02, nullptr, "Unknown"},
+ {0x02, nullptr, "SetMemoryPermission32"},
{0x03, SvcWrap32<SetMemoryAttribute32>, "SetMemoryAttribute32"},
{0x04, SvcWrap32<MapMemory32>, "MapMemory32"},
{0x05, SvcWrap32<UnmapMemory32>, "UnmapMemory32"},
@@ -2591,97 +2633,97 @@ static const FunctionDef SVC_Table_32[] = {
{0x1d, SvcWrap32<SignalProcessWideKey32>, "SignalProcessWideKey32"},
{0x1e, SvcWrap32<GetSystemTick32>, "GetSystemTick32"},
{0x1f, SvcWrap32<ConnectToNamedPort32>, "ConnectToNamedPort32"},
- {0x20, nullptr, "Unknown"},
+ {0x20, nullptr, "SendSyncRequestLight32"},
{0x21, SvcWrap32<SendSyncRequest32>, "SendSyncRequest32"},
{0x22, nullptr, "SendSyncRequestWithUserBuffer32"},
- {0x23, nullptr, "Unknown"},
+ {0x23, nullptr, "SendAsyncRequestWithUserBuffer32"},
{0x24, SvcWrap32<GetProcessId32>, "GetProcessId32"},
{0x25, SvcWrap32<GetThreadId32>, "GetThreadId32"},
{0x26, SvcWrap32<Break32>, "Break32"},
- {0x27, nullptr, "OutputDebugString32"},
- {0x28, nullptr, "Unknown"},
+ {0x27, SvcWrap32<OutputDebugString32>, "OutputDebugString32"},
+ {0x28, nullptr, "ReturnFromException32"},
{0x29, SvcWrap32<GetInfo32>, "GetInfo32"},
- {0x2a, nullptr, "Unknown"},
- {0x2b, nullptr, "Unknown"},
+ {0x2a, nullptr, "FlushEntireDataCache32"},
+ {0x2b, nullptr, "FlushDataCache32"},
{0x2c, SvcWrap32<MapPhysicalMemory32>, "MapPhysicalMemory32"},
{0x2d, SvcWrap32<UnmapPhysicalMemory32>, "UnmapPhysicalMemory32"},
- {0x2e, nullptr, "Unknown"},
- {0x2f, nullptr, "Unknown"},
- {0x30, nullptr, "Unknown"},
- {0x31, nullptr, "Unknown"},
+ {0x2e, nullptr, "GetDebugFutureThreadInfo32"},
+ {0x2f, nullptr, "GetLastThreadInfo32"},
+ {0x30, nullptr, "GetResourceLimitLimitValue32"},
+ {0x31, nullptr, "GetResourceLimitCurrentValue32"},
{0x32, SvcWrap32<SetThreadActivity32>, "SetThreadActivity32"},
{0x33, SvcWrap32<GetThreadContext32>, "GetThreadContext32"},
{0x34, SvcWrap32<WaitForAddress32>, "WaitForAddress32"},
{0x35, SvcWrap32<SignalToAddress32>, "SignalToAddress32"},
{0x36, SvcWrap32<SynchronizePreemptionState>, "SynchronizePreemptionState32"},
- {0x37, nullptr, "Unknown"},
- {0x38, nullptr, "Unknown"},
- {0x39, nullptr, "Unknown"},
- {0x3a, nullptr, "Unknown"},
- {0x3b, nullptr, "Unknown"},
- {0x3c, nullptr, "Unknown"},
- {0x3d, nullptr, "Unknown"},
- {0x3e, nullptr, "Unknown"},
- {0x3f, nullptr, "Unknown"},
+ {0x37, nullptr, "GetResourceLimitPeakValue32"},
+ {0x38, nullptr, "Unknown38"},
+ {0x39, nullptr, "CreateIoPool32"},
+ {0x3a, nullptr, "CreateIoRegion32"},
+ {0x3b, nullptr, "Unknown3b"},
+ {0x3c, nullptr, "KernelDebug32"},
+ {0x3d, nullptr, "ChangeKernelTraceState32"},
+ {0x3e, nullptr, "Unknown3e"},
+ {0x3f, nullptr, "Unknown3f"},
{0x40, nullptr, "CreateSession32"},
{0x41, nullptr, "AcceptSession32"},
- {0x42, nullptr, "Unknown"},
+ {0x42, nullptr, "ReplyAndReceiveLight32"},
{0x43, nullptr, "ReplyAndReceive32"},
- {0x44, nullptr, "Unknown"},
+ {0x44, nullptr, "ReplyAndReceiveWithUserBuffer32"},
{0x45, SvcWrap32<CreateEvent32>, "CreateEvent32"},
- {0x46, nullptr, "Unknown"},
- {0x47, nullptr, "Unknown"},
- {0x48, nullptr, "Unknown"},
- {0x49, nullptr, "Unknown"},
- {0x4a, nullptr, "Unknown"},
- {0x4b, nullptr, "Unknown"},
- {0x4c, nullptr, "Unknown"},
- {0x4d, nullptr, "Unknown"},
- {0x4e, nullptr, "Unknown"},
- {0x4f, nullptr, "Unknown"},
- {0x50, nullptr, "Unknown"},
- {0x51, nullptr, "Unknown"},
- {0x52, nullptr, "Unknown"},
- {0x53, nullptr, "Unknown"},
- {0x54, nullptr, "Unknown"},
- {0x55, nullptr, "Unknown"},
- {0x56, nullptr, "Unknown"},
- {0x57, nullptr, "Unknown"},
- {0x58, nullptr, "Unknown"},
- {0x59, nullptr, "Unknown"},
- {0x5a, nullptr, "Unknown"},
- {0x5b, nullptr, "Unknown"},
- {0x5c, nullptr, "Unknown"},
- {0x5d, nullptr, "Unknown"},
- {0x5e, nullptr, "Unknown"},
+ {0x46, nullptr, "MapIoRegion32"},
+ {0x47, nullptr, "UnmapIoRegion32"},
+ {0x48, nullptr, "MapPhysicalMemoryUnsafe32"},
+ {0x49, nullptr, "UnmapPhysicalMemoryUnsafe32"},
+ {0x4a, nullptr, "SetUnsafeLimit32"},
+ {0x4b, SvcWrap32<CreateCodeMemory32>, "CreateCodeMemory32"},
+ {0x4c, SvcWrap32<ControlCodeMemory32>, "ControlCodeMemory32"},
+ {0x4d, nullptr, "SleepSystem32"},
+ {0x4e, nullptr, "ReadWriteRegister32"},
+ {0x4f, nullptr, "SetProcessActivity32"},
+ {0x50, nullptr, "CreateSharedMemory32"},
+ {0x51, nullptr, "MapTransferMemory32"},
+ {0x52, nullptr, "UnmapTransferMemory32"},
+ {0x53, nullptr, "CreateInterruptEvent32"},
+ {0x54, nullptr, "QueryPhysicalAddress32"},
+ {0x55, nullptr, "QueryIoMapping32"},
+ {0x56, nullptr, "CreateDeviceAddressSpace32"},
+ {0x57, nullptr, "AttachDeviceAddressSpace32"},
+ {0x58, nullptr, "DetachDeviceAddressSpace32"},
+ {0x59, nullptr, "MapDeviceAddressSpaceByForce32"},
+ {0x5a, nullptr, "MapDeviceAddressSpaceAligned32"},
+ {0x5b, nullptr, "MapDeviceAddressSpace32"},
+ {0x5c, nullptr, "UnmapDeviceAddressSpace32"},
+ {0x5d, nullptr, "InvalidateProcessDataCache32"},
+ {0x5e, nullptr, "StoreProcessDataCache32"},
{0x5F, SvcWrap32<FlushProcessDataCache32>, "FlushProcessDataCache32"},
- {0x60, nullptr, "Unknown"},
- {0x61, nullptr, "Unknown"},
- {0x62, nullptr, "Unknown"},
- {0x63, nullptr, "Unknown"},
- {0x64, nullptr, "Unknown"},
+ {0x60, nullptr, "StoreProcessDataCache32"},
+ {0x61, nullptr, "BreakDebugProcess32"},
+ {0x62, nullptr, "TerminateDebugProcess32"},
+ {0x63, nullptr, "GetDebugEvent32"},
+ {0x64, nullptr, "ContinueDebugEvent32"},
{0x65, nullptr, "GetProcessList32"},
- {0x66, nullptr, "Unknown"},
- {0x67, nullptr, "Unknown"},
- {0x68, nullptr, "Unknown"},
- {0x69, nullptr, "Unknown"},
- {0x6A, nullptr, "Unknown"},
- {0x6B, nullptr, "Unknown"},
- {0x6C, nullptr, "Unknown"},
- {0x6D, nullptr, "Unknown"},
- {0x6E, nullptr, "Unknown"},
+ {0x66, nullptr, "GetThreadList"},
+ {0x67, nullptr, "GetDebugThreadContext32"},
+ {0x68, nullptr, "SetDebugThreadContext32"},
+ {0x69, nullptr, "QueryDebugProcessMemory32"},
+ {0x6A, nullptr, "ReadDebugProcessMemory32"},
+ {0x6B, nullptr, "WriteDebugProcessMemory32"},
+ {0x6C, nullptr, "SetHardwareBreakPoint32"},
+ {0x6D, nullptr, "GetDebugThreadParam32"},
+ {0x6E, nullptr, "Unknown6E"},
{0x6f, nullptr, "GetSystemInfo32"},
{0x70, nullptr, "CreatePort32"},
{0x71, nullptr, "ManageNamedPort32"},
{0x72, nullptr, "ConnectToPort32"},
{0x73, nullptr, "SetProcessMemoryPermission32"},
- {0x74, nullptr, "Unknown"},
- {0x75, nullptr, "Unknown"},
- {0x76, nullptr, "Unknown"},
+ {0x74, nullptr, "MapProcessMemory32"},
+ {0x75, nullptr, "UnmapProcessMemory32"},
+ {0x76, nullptr, "QueryProcessMemory32"},
{0x77, nullptr, "MapProcessCodeMemory32"},
{0x78, nullptr, "UnmapProcessCodeMemory32"},
- {0x79, nullptr, "Unknown"},
- {0x7A, nullptr, "Unknown"},
+ {0x79, nullptr, "CreateProcess32"},
+ {0x7A, nullptr, "StartProcess32"},
{0x7B, nullptr, "TerminateProcess32"},
{0x7C, nullptr, "GetProcessInfo32"},
{0x7D, nullptr, "CreateResourceLimit32"},
@@ -2754,7 +2796,7 @@ static const FunctionDef SVC_Table_32[] = {
};
static const FunctionDef SVC_Table_64[] = {
- {0x00, nullptr, "Unknown"},
+ {0x00, nullptr, "Unknown0"},
{0x01, SvcWrap64<SetHeapSize>, "SetHeapSize"},
{0x02, SvcWrap64<SetMemoryPermission>, "SetMemoryPermission"},
{0x03, SvcWrap64<SetMemoryAttribute>, "SetMemoryAttribute"},
@@ -2809,23 +2851,23 @@ static const FunctionDef SVC_Table_64[] = {
{0x34, SvcWrap64<WaitForAddress>, "WaitForAddress"},
{0x35, SvcWrap64<SignalToAddress>, "SignalToAddress"},
{0x36, SvcWrap64<SynchronizePreemptionState>, "SynchronizePreemptionState"},
- {0x37, nullptr, "Unknown"},
- {0x38, nullptr, "Unknown"},
- {0x39, nullptr, "Unknown"},
- {0x3A, nullptr, "Unknown"},
- {0x3B, nullptr, "Unknown"},
+ {0x37, nullptr, "GetResourceLimitPeakValue"},
+ {0x38, nullptr, "Unknown38"},
+ {0x39, nullptr, "CreateIoPool"},
+ {0x3A, nullptr, "CreateIoRegion"},
+ {0x3B, nullptr, "Unknown3B"},
{0x3C, SvcWrap64<KernelDebug>, "KernelDebug"},
{0x3D, SvcWrap64<ChangeKernelTraceState>, "ChangeKernelTraceState"},
- {0x3E, nullptr, "Unknown"},
- {0x3F, nullptr, "Unknown"},
+ {0x3E, nullptr, "Unknown3e"},
+ {0x3F, nullptr, "Unknown3f"},
{0x40, nullptr, "CreateSession"},
{0x41, nullptr, "AcceptSession"},
{0x42, nullptr, "ReplyAndReceiveLight"},
{0x43, nullptr, "ReplyAndReceive"},
{0x44, nullptr, "ReplyAndReceiveWithUserBuffer"},
{0x45, SvcWrap64<CreateEvent>, "CreateEvent"},
- {0x46, nullptr, "Unknown"},
- {0x47, nullptr, "Unknown"},
+ {0x46, nullptr, "MapIoRegion"},
+ {0x47, nullptr, "UnmapIoRegion"},
{0x48, nullptr, "MapPhysicalMemoryUnsafe"},
{0x49, nullptr, "UnmapPhysicalMemoryUnsafe"},
{0x4A, nullptr, "SetUnsafeLimit"},
@@ -2864,7 +2906,7 @@ static const FunctionDef SVC_Table_64[] = {
{0x6B, nullptr, "WriteDebugProcessMemory"},
{0x6C, nullptr, "SetHardwareBreakPoint"},
{0x6D, nullptr, "GetDebugThreadParam"},
- {0x6E, nullptr, "Unknown"},
+ {0x6E, nullptr, "Unknown6E"},
{0x6F, nullptr, "GetSystemInfo"},
{0x70, nullptr, "CreatePort"},
{0x71, nullptr, "ManageNamedPort"},
@@ -2965,11 +3007,10 @@ static const FunctionDef* GetSVCInfo64(u32 func_num) {
}
void Call(Core::System& system, u32 immediate) {
- system.ExitDynarmicProfile();
auto& kernel = system.Kernel();
kernel.EnterSVCProfile();
- auto* thread = kernel.CurrentScheduler()->GetCurrentThread();
+ auto* thread = GetCurrentThreadPointer(kernel);
thread->SetIsCallingSvc();
const FunctionDef* info = system.CurrentProcess()->Is64BitProcess() ? GetSVCInfo64(immediate)
@@ -2985,13 +3026,6 @@ void Call(Core::System& system, u32 immediate) {
}
kernel.ExitSVCProfile();
-
- if (!thread->IsCallingSvc()) {
- auto* host_context = thread->GetHostContext().get();
- host_context->Rewind();
- }
-
- system.EnterDynarmicProfile();
}
} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc.h b/src/core/hle/kernel/svc.h
index 46e64277e..13f061b83 100644
--- a/src/core/hle/kernel/svc.h
+++ b/src/core/hle/kernel/svc.h
@@ -1,6 +1,5 @@
-// Copyright 2018 yuzu emulator team
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
diff --git a/src/core/hle/kernel/svc_common.h b/src/core/hle/kernel/svc_common.h
index 25de6e437..95750c3eb 100644
--- a/src/core/hle/kernel/svc_common.h
+++ b/src/core/hle/kernel/svc_common.h
@@ -1,6 +1,5 @@
-// Copyright 2020 yuzu emulator team
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
diff --git a/src/core/hle/kernel/svc_results.h b/src/core/hle/kernel/svc_results.h
index 53a940723..f27cade33 100644
--- a/src/core/hle/kernel/svc_results.h
+++ b/src/core/hle/kernel/svc_results.h
@@ -1,6 +1,5 @@
-// Copyright 2018 yuzu emulator team
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
@@ -10,34 +9,34 @@ namespace Kernel {
// Confirmed Switch kernel error codes
-constexpr ResultCode ResultOutOfSessions{ErrorModule::Kernel, 7};
-constexpr ResultCode ResultInvalidArgument{ErrorModule::Kernel, 14};
-constexpr ResultCode ResultNoSynchronizationObject{ErrorModule::Kernel, 57};
-constexpr ResultCode ResultTerminationRequested{ErrorModule::Kernel, 59};
-constexpr ResultCode ResultInvalidSize{ErrorModule::Kernel, 101};
-constexpr ResultCode ResultInvalidAddress{ErrorModule::Kernel, 102};
-constexpr ResultCode ResultOutOfResource{ErrorModule::Kernel, 103};
-constexpr ResultCode ResultOutOfMemory{ErrorModule::Kernel, 104};
-constexpr ResultCode ResultOutOfHandles{ErrorModule::Kernel, 105};
-constexpr ResultCode ResultInvalidCurrentMemory{ErrorModule::Kernel, 106};
-constexpr ResultCode ResultInvalidNewMemoryPermission{ErrorModule::Kernel, 108};
-constexpr ResultCode ResultInvalidMemoryRegion{ErrorModule::Kernel, 110};
-constexpr ResultCode ResultInvalidPriority{ErrorModule::Kernel, 112};
-constexpr ResultCode ResultInvalidCoreId{ErrorModule::Kernel, 113};
-constexpr ResultCode ResultInvalidHandle{ErrorModule::Kernel, 114};
-constexpr ResultCode ResultInvalidPointer{ErrorModule::Kernel, 115};
-constexpr ResultCode ResultInvalidCombination{ErrorModule::Kernel, 116};
-constexpr ResultCode ResultTimedOut{ErrorModule::Kernel, 117};
-constexpr ResultCode ResultCancelled{ErrorModule::Kernel, 118};
-constexpr ResultCode ResultOutOfRange{ErrorModule::Kernel, 119};
-constexpr ResultCode ResultInvalidEnumValue{ErrorModule::Kernel, 120};
-constexpr ResultCode ResultNotFound{ErrorModule::Kernel, 121};
-constexpr ResultCode ResultBusy{ErrorModule::Kernel, 122};
-constexpr ResultCode ResultSessionClosed{ErrorModule::Kernel, 123};
-constexpr ResultCode ResultInvalidState{ErrorModule::Kernel, 125};
-constexpr ResultCode ResultReservedUsed{ErrorModule::Kernel, 126};
-constexpr ResultCode ResultPortClosed{ErrorModule::Kernel, 131};
-constexpr ResultCode ResultLimitReached{ErrorModule::Kernel, 132};
-constexpr ResultCode ResultInvalidId{ErrorModule::Kernel, 519};
+constexpr Result ResultOutOfSessions{ErrorModule::Kernel, 7};
+constexpr Result ResultInvalidArgument{ErrorModule::Kernel, 14};
+constexpr Result ResultNoSynchronizationObject{ErrorModule::Kernel, 57};
+constexpr Result ResultTerminationRequested{ErrorModule::Kernel, 59};
+constexpr Result ResultInvalidSize{ErrorModule::Kernel, 101};
+constexpr Result ResultInvalidAddress{ErrorModule::Kernel, 102};
+constexpr Result ResultOutOfResource{ErrorModule::Kernel, 103};
+constexpr Result ResultOutOfMemory{ErrorModule::Kernel, 104};
+constexpr Result ResultOutOfHandles{ErrorModule::Kernel, 105};
+constexpr Result ResultInvalidCurrentMemory{ErrorModule::Kernel, 106};
+constexpr Result ResultInvalidNewMemoryPermission{ErrorModule::Kernel, 108};
+constexpr Result ResultInvalidMemoryRegion{ErrorModule::Kernel, 110};
+constexpr Result ResultInvalidPriority{ErrorModule::Kernel, 112};
+constexpr Result ResultInvalidCoreId{ErrorModule::Kernel, 113};
+constexpr Result ResultInvalidHandle{ErrorModule::Kernel, 114};
+constexpr Result ResultInvalidPointer{ErrorModule::Kernel, 115};
+constexpr Result ResultInvalidCombination{ErrorModule::Kernel, 116};
+constexpr Result ResultTimedOut{ErrorModule::Kernel, 117};
+constexpr Result ResultCancelled{ErrorModule::Kernel, 118};
+constexpr Result ResultOutOfRange{ErrorModule::Kernel, 119};
+constexpr Result ResultInvalidEnumValue{ErrorModule::Kernel, 120};
+constexpr Result ResultNotFound{ErrorModule::Kernel, 121};
+constexpr Result ResultBusy{ErrorModule::Kernel, 122};
+constexpr Result ResultSessionClosed{ErrorModule::Kernel, 123};
+constexpr Result ResultInvalidState{ErrorModule::Kernel, 125};
+constexpr Result ResultReservedUsed{ErrorModule::Kernel, 126};
+constexpr Result ResultPortClosed{ErrorModule::Kernel, 131};
+constexpr Result ResultLimitReached{ErrorModule::Kernel, 132};
+constexpr Result ResultInvalidId{ErrorModule::Kernel, 519};
} // namespace Kernel
diff --git a/src/core/hle/kernel/svc_types.h b/src/core/hle/kernel/svc_types.h
index 365e22e4e..79e15183a 100644
--- a/src/core/hle/kernel/svc_types.h
+++ b/src/core/hle/kernel/svc_types.h
@@ -1,6 +1,5 @@
-// Copyright 2020 yuzu emulator team
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
@@ -96,4 +95,6 @@ constexpr inline s32 IdealCoreNoUpdate = -3;
constexpr inline s32 LowestThreadPriority = 63;
constexpr inline s32 HighestThreadPriority = 0;
+constexpr inline size_t ThreadLocalRegionSize = 0x200;
+
} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc_wrap.h b/src/core/hle/kernel/svc_wrap.h
index a60adfcab..4bc49087e 100644
--- a/src/core/hle/kernel/svc_wrap.h
+++ b/src/core/hle/kernel/svc_wrap.h
@@ -1,6 +1,5 @@
-// Copyright 2018 yuzu emulator team
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
@@ -34,24 +33,24 @@ static inline void FuncReturn32(Core::System& system, u32 result) {
}
////////////////////////////////////////////////////////////////////////////////////////////////////
-// Function wrappers that return type ResultCode
+// Function wrappers that return type Result
-template <ResultCode func(Core::System&, u64)>
+template <Result func(Core::System&, u64)>
void SvcWrap64(Core::System& system) {
FuncReturn(system, func(system, Param(system, 0)).raw);
}
-template <ResultCode func(Core::System&, u64, u64)>
+template <Result func(Core::System&, u64, u64)>
void SvcWrap64(Core::System& system) {
FuncReturn(system, func(system, Param(system, 0), Param(system, 1)).raw);
}
-template <ResultCode func(Core::System&, u32)>
+template <Result func(Core::System&, u32)>
void SvcWrap64(Core::System& system) {
FuncReturn(system, func(system, static_cast<u32>(Param(system, 0))).raw);
}
-template <ResultCode func(Core::System&, u32, u32)>
+template <Result func(Core::System&, u32, u32)>
void SvcWrap64(Core::System& system) {
FuncReturn(
system,
@@ -59,14 +58,14 @@ void SvcWrap64(Core::System& system) {
}
// Used by SetThreadActivity
-template <ResultCode func(Core::System&, Handle, Svc::ThreadActivity)>
+template <Result func(Core::System&, Handle, Svc::ThreadActivity)>
void SvcWrap64(Core::System& system) {
FuncReturn(system, func(system, static_cast<u32>(Param(system, 0)),
static_cast<Svc::ThreadActivity>(Param(system, 1)))
.raw);
}
-template <ResultCode func(Core::System&, u32, u64, u64, u64)>
+template <Result func(Core::System&, u32, u64, u64, u64)>
void SvcWrap64(Core::System& system) {
FuncReturn(system, func(system, static_cast<u32>(Param(system, 0)), Param(system, 1),
Param(system, 2), Param(system, 3))
@@ -74,7 +73,7 @@ void SvcWrap64(Core::System& system) {
}
// Used by MapProcessMemory and UnmapProcessMemory
-template <ResultCode func(Core::System&, u64, u32, u64, u64)>
+template <Result func(Core::System&, u64, u32, u64, u64)>
void SvcWrap64(Core::System& system) {
FuncReturn(system, func(system, Param(system, 0), static_cast<u32>(Param(system, 1)),
Param(system, 2), Param(system, 3))
@@ -82,7 +81,7 @@ void SvcWrap64(Core::System& system) {
}
// Used by ControlCodeMemory
-template <ResultCode func(Core::System&, Handle, u32, u64, u64, Svc::MemoryPermission)>
+template <Result func(Core::System&, Handle, u32, u64, u64, Svc::MemoryPermission)>
void SvcWrap64(Core::System& system) {
FuncReturn(system, func(system, static_cast<Handle>(Param(system, 0)),
static_cast<u32>(Param(system, 1)), Param(system, 2), Param(system, 3),
@@ -90,7 +89,7 @@ void SvcWrap64(Core::System& system) {
.raw);
}
-template <ResultCode func(Core::System&, u32*)>
+template <Result func(Core::System&, u32*)>
void SvcWrap64(Core::System& system) {
u32 param = 0;
const u32 retval = func(system, &param).raw;
@@ -98,7 +97,7 @@ void SvcWrap64(Core::System& system) {
FuncReturn(system, retval);
}
-template <ResultCode func(Core::System&, u32*, u32)>
+template <Result func(Core::System&, u32*, u32)>
void SvcWrap64(Core::System& system) {
u32 param_1 = 0;
const u32 retval = func(system, &param_1, static_cast<u32>(Param(system, 1))).raw;
@@ -106,7 +105,7 @@ void SvcWrap64(Core::System& system) {
FuncReturn(system, retval);
}
-template <ResultCode func(Core::System&, u32*, u32*)>
+template <Result func(Core::System&, u32*, u32*)>
void SvcWrap64(Core::System& system) {
u32 param_1 = 0;
u32 param_2 = 0;
@@ -119,7 +118,7 @@ void SvcWrap64(Core::System& system) {
FuncReturn(system, retval);
}
-template <ResultCode func(Core::System&, u32*, u64)>
+template <Result func(Core::System&, u32*, u64)>
void SvcWrap64(Core::System& system) {
u32 param_1 = 0;
const u32 retval = func(system, &param_1, Param(system, 1)).raw;
@@ -127,7 +126,7 @@ void SvcWrap64(Core::System& system) {
FuncReturn(system, retval);
}
-template <ResultCode func(Core::System&, u32*, u64, u32)>
+template <Result func(Core::System&, u32*, u64, u32)>
void SvcWrap64(Core::System& system) {
u32 param_1 = 0;
const u32 retval =
@@ -137,7 +136,7 @@ void SvcWrap64(Core::System& system) {
FuncReturn(system, retval);
}
-template <ResultCode func(Core::System&, u64*, u32)>
+template <Result func(Core::System&, u64*, u32)>
void SvcWrap64(Core::System& system) {
u64 param_1 = 0;
const u32 retval = func(system, &param_1, static_cast<u32>(Param(system, 1))).raw;
@@ -146,12 +145,12 @@ void SvcWrap64(Core::System& system) {
FuncReturn(system, retval);
}
-template <ResultCode func(Core::System&, u64, u32)>
+template <Result func(Core::System&, u64, u32)>
void SvcWrap64(Core::System& system) {
FuncReturn(system, func(system, Param(system, 0), static_cast<u32>(Param(system, 1))).raw);
}
-template <ResultCode func(Core::System&, u64*, u64)>
+template <Result func(Core::System&, u64*, u64)>
void SvcWrap64(Core::System& system) {
u64 param_1 = 0;
const u32 retval = func(system, &param_1, Param(system, 1)).raw;
@@ -160,7 +159,7 @@ void SvcWrap64(Core::System& system) {
FuncReturn(system, retval);
}
-template <ResultCode func(Core::System&, u64*, u32, u32)>
+template <Result func(Core::System&, u64*, u32, u32)>
void SvcWrap64(Core::System& system) {
u64 param_1 = 0;
const u32 retval = func(system, &param_1, static_cast<u32>(Param(system, 1)),
@@ -172,7 +171,7 @@ void SvcWrap64(Core::System& system) {
}
// Used by GetResourceLimitLimitValue.
-template <ResultCode func(Core::System&, u64*, Handle, LimitableResource)>
+template <Result func(Core::System&, u64*, Handle, LimitableResource)>
void SvcWrap64(Core::System& system) {
u64 param_1 = 0;
const u32 retval = func(system, &param_1, static_cast<Handle>(Param(system, 1)),
@@ -183,13 +182,13 @@ void SvcWrap64(Core::System& system) {
FuncReturn(system, retval);
}
-template <ResultCode func(Core::System&, u32, u64)>
+template <Result func(Core::System&, u32, u64)>
void SvcWrap64(Core::System& system) {
FuncReturn(system, func(system, static_cast<u32>(Param(system, 0)), Param(system, 1)).raw);
}
// Used by SetResourceLimitLimitValue
-template <ResultCode func(Core::System&, Handle, LimitableResource, u64)>
+template <Result func(Core::System&, Handle, LimitableResource, u64)>
void SvcWrap64(Core::System& system) {
FuncReturn(system, func(system, static_cast<Handle>(Param(system, 0)),
static_cast<LimitableResource>(Param(system, 1)), Param(system, 2))
@@ -197,7 +196,7 @@ void SvcWrap64(Core::System& system) {
}
// Used by SetThreadCoreMask
-template <ResultCode func(Core::System&, Handle, s32, u64)>
+template <Result func(Core::System&, Handle, s32, u64)>
void SvcWrap64(Core::System& system) {
FuncReturn(system, func(system, static_cast<u32>(Param(system, 0)),
static_cast<s32>(Param(system, 1)), Param(system, 2))
@@ -205,44 +204,44 @@ void SvcWrap64(Core::System& system) {
}
// Used by GetThreadCoreMask
-template <ResultCode func(Core::System&, Handle, s32*, u64*)>
+template <Result func(Core::System&, Handle, s32*, u64*)>
void SvcWrap64(Core::System& system) {
s32 param_1 = 0;
u64 param_2 = 0;
- const ResultCode retval = func(system, static_cast<u32>(Param(system, 2)), &param_1, &param_2);
+ const Result retval = func(system, static_cast<u32>(Param(system, 2)), &param_1, &param_2);
system.CurrentArmInterface().SetReg(1, param_1);
system.CurrentArmInterface().SetReg(2, param_2);
FuncReturn(system, retval.raw);
}
-template <ResultCode func(Core::System&, u64, u64, u32, u32)>
+template <Result func(Core::System&, u64, u64, u32, u32)>
void SvcWrap64(Core::System& system) {
FuncReturn(system, func(system, Param(system, 0), Param(system, 1),
static_cast<u32>(Param(system, 2)), static_cast<u32>(Param(system, 3)))
.raw);
}
-template <ResultCode func(Core::System&, u64, u64, u32, u64)>
+template <Result func(Core::System&, u64, u64, u32, u64)>
void SvcWrap64(Core::System& system) {
FuncReturn(system, func(system, Param(system, 0), Param(system, 1),
static_cast<u32>(Param(system, 2)), Param(system, 3))
.raw);
}
-template <ResultCode func(Core::System&, u32, u64, u32)>
+template <Result func(Core::System&, u32, u64, u32)>
void SvcWrap64(Core::System& system) {
FuncReturn(system, func(system, static_cast<u32>(Param(system, 0)), Param(system, 1),
static_cast<u32>(Param(system, 2)))
.raw);
}
-template <ResultCode func(Core::System&, u64, u64, u64)>
+template <Result func(Core::System&, u64, u64, u64)>
void SvcWrap64(Core::System& system) {
FuncReturn(system, func(system, Param(system, 0), Param(system, 1), Param(system, 2)).raw);
}
-template <ResultCode func(Core::System&, u64, u64, u32)>
+template <Result func(Core::System&, u64, u64, u32)>
void SvcWrap64(Core::System& system) {
FuncReturn(
system,
@@ -250,7 +249,7 @@ void SvcWrap64(Core::System& system) {
}
// Used by SetMemoryPermission
-template <ResultCode func(Core::System&, u64, u64, Svc::MemoryPermission)>
+template <Result func(Core::System&, u64, u64, Svc::MemoryPermission)>
void SvcWrap64(Core::System& system) {
FuncReturn(system, func(system, Param(system, 0), Param(system, 1),
static_cast<Svc::MemoryPermission>(Param(system, 2)))
@@ -258,14 +257,14 @@ void SvcWrap64(Core::System& system) {
}
// Used by MapSharedMemory
-template <ResultCode func(Core::System&, Handle, u64, u64, Svc::MemoryPermission)>
+template <Result func(Core::System&, Handle, u64, u64, Svc::MemoryPermission)>
void SvcWrap64(Core::System& system) {
FuncReturn(system, func(system, static_cast<Handle>(Param(system, 0)), Param(system, 1),
Param(system, 2), static_cast<Svc::MemoryPermission>(Param(system, 3)))
.raw);
}
-template <ResultCode func(Core::System&, u32, u64, u64)>
+template <Result func(Core::System&, u32, u64, u64)>
void SvcWrap64(Core::System& system) {
FuncReturn(
system,
@@ -273,7 +272,7 @@ void SvcWrap64(Core::System& system) {
}
// Used by WaitSynchronization
-template <ResultCode func(Core::System&, s32*, u64, s32, s64)>
+template <Result func(Core::System&, s32*, u64, s32, s64)>
void SvcWrap64(Core::System& system) {
s32 param_1 = 0;
const u32 retval = func(system, &param_1, Param(system, 1), static_cast<s32>(Param(system, 2)),
@@ -284,7 +283,7 @@ void SvcWrap64(Core::System& system) {
FuncReturn(system, retval);
}
-template <ResultCode func(Core::System&, u64, u64, u32, s64)>
+template <Result func(Core::System&, u64, u64, u32, s64)>
void SvcWrap64(Core::System& system) {
FuncReturn(system, func(system, Param(system, 0), Param(system, 1),
static_cast<u32>(Param(system, 2)), static_cast<s64>(Param(system, 3)))
@@ -292,7 +291,7 @@ void SvcWrap64(Core::System& system) {
}
// Used by GetInfo
-template <ResultCode func(Core::System&, u64*, u64, Handle, u64)>
+template <Result func(Core::System&, u64*, u64, Handle, u64)>
void SvcWrap64(Core::System& system) {
u64 param_1 = 0;
const u32 retval = func(system, &param_1, Param(system, 1),
@@ -303,7 +302,7 @@ void SvcWrap64(Core::System& system) {
FuncReturn(system, retval);
}
-template <ResultCode func(Core::System&, u32*, u64, u64, u64, u32, s32)>
+template <Result func(Core::System&, u32*, u64, u64, u64, u32, s32)>
void SvcWrap64(Core::System& system) {
u32 param_1 = 0;
const u32 retval = func(system, &param_1, Param(system, 1), Param(system, 2), Param(system, 3),
@@ -315,7 +314,7 @@ void SvcWrap64(Core::System& system) {
}
// Used by CreateTransferMemory
-template <ResultCode func(Core::System&, Handle*, u64, u64, Svc::MemoryPermission)>
+template <Result func(Core::System&, Handle*, u64, u64, Svc::MemoryPermission)>
void SvcWrap64(Core::System& system) {
u32 param_1 = 0;
const u32 retval = func(system, &param_1, Param(system, 1), Param(system, 2),
@@ -327,7 +326,7 @@ void SvcWrap64(Core::System& system) {
}
// Used by CreateCodeMemory
-template <ResultCode func(Core::System&, Handle*, u64, u64)>
+template <Result func(Core::System&, Handle*, u64, u64)>
void SvcWrap64(Core::System& system) {
u32 param_1 = 0;
const u32 retval = func(system, &param_1, Param(system, 1), Param(system, 2)).raw;
@@ -336,7 +335,7 @@ void SvcWrap64(Core::System& system) {
FuncReturn(system, retval);
}
-template <ResultCode func(Core::System&, Handle*, u64, u32, u32)>
+template <Result func(Core::System&, Handle*, u64, u32, u32)>
void SvcWrap64(Core::System& system) {
u32 param_1 = 0;
const u32 retval = func(system, &param_1, Param(system, 1), static_cast<u32>(Param(system, 2)),
@@ -348,7 +347,7 @@ void SvcWrap64(Core::System& system) {
}
// Used by WaitForAddress
-template <ResultCode func(Core::System&, u64, Svc::ArbitrationType, s32, s64)>
+template <Result func(Core::System&, u64, Svc::ArbitrationType, s32, s64)>
void SvcWrap64(Core::System& system) {
FuncReturn(system,
func(system, Param(system, 0), static_cast<Svc::ArbitrationType>(Param(system, 1)),
@@ -357,7 +356,7 @@ void SvcWrap64(Core::System& system) {
}
// Used by SignalToAddress
-template <ResultCode func(Core::System&, u64, Svc::SignalType, s32, s32)>
+template <Result func(Core::System&, u64, Svc::SignalType, s32, s32)>
void SvcWrap64(Core::System& system) {
FuncReturn(system,
func(system, Param(system, 0), static_cast<Svc::SignalType>(Param(system, 1)),
@@ -426,7 +425,7 @@ void SvcWrap64(Core::System& system) {
}
// Used by QueryMemory32, ArbitrateLock32
-template <ResultCode func(Core::System&, u32, u32, u32)>
+template <Result func(Core::System&, u32, u32, u32)>
void SvcWrap32(Core::System& system) {
FuncReturn32(system,
func(system, Param32(system, 0), Param32(system, 1), Param32(system, 2)).raw);
@@ -457,7 +456,7 @@ void SvcWrap32(Core::System& system) {
}
// Used by CreateThread32
-template <ResultCode func(Core::System&, Handle*, u32, u32, u32, u32, s32)>
+template <Result func(Core::System&, Handle*, u32, u32, u32, u32, s32)>
void SvcWrap32(Core::System& system) {
Handle param_1 = 0;
@@ -470,7 +469,7 @@ void SvcWrap32(Core::System& system) {
}
// Used by GetInfo32
-template <ResultCode func(Core::System&, u32*, u32*, u32, u32, u32, u32)>
+template <Result func(Core::System&, u32*, u32*, u32, u32, u32, u32)>
void SvcWrap32(Core::System& system) {
u32 param_1 = 0;
u32 param_2 = 0;
@@ -485,7 +484,7 @@ void SvcWrap32(Core::System& system) {
}
// Used by GetThreadPriority32, ConnectToNamedPort32
-template <ResultCode func(Core::System&, u32*, u32)>
+template <Result func(Core::System&, u32*, u32)>
void SvcWrap32(Core::System& system) {
u32 param_1 = 0;
const u32 retval = func(system, &param_1, Param32(system, 1)).raw;
@@ -494,7 +493,7 @@ void SvcWrap32(Core::System& system) {
}
// Used by GetThreadId32
-template <ResultCode func(Core::System&, u32*, u32*, u32)>
+template <Result func(Core::System&, u32*, u32*, u32)>
void SvcWrap32(Core::System& system) {
u32 param_1 = 0;
u32 param_2 = 0;
@@ -517,7 +516,7 @@ void SvcWrap32(Core::System& system) {
}
// Used by CreateEvent32
-template <ResultCode func(Core::System&, Handle*, Handle*)>
+template <Result func(Core::System&, Handle*, Handle*)>
void SvcWrap32(Core::System& system) {
Handle param_1 = 0;
Handle param_2 = 0;
@@ -529,7 +528,7 @@ void SvcWrap32(Core::System& system) {
}
// Used by GetThreadId32
-template <ResultCode func(Core::System&, Handle, u32*, u32*, u32*)>
+template <Result func(Core::System&, Handle, u32*, u32*, u32*)>
void SvcWrap32(Core::System& system) {
u32 param_1 = 0;
u32 param_2 = 0;
@@ -543,7 +542,7 @@ void SvcWrap32(Core::System& system) {
}
// Used by GetThreadCoreMask32
-template <ResultCode func(Core::System&, Handle, s32*, u32*, u32*)>
+template <Result func(Core::System&, Handle, s32*, u32*, u32*)>
void SvcWrap32(Core::System& system) {
s32 param_1 = 0;
u32 param_2 = 0;
@@ -563,7 +562,7 @@ void SvcWrap32(Core::System& system) {
}
// Used by SetThreadActivity32
-template <ResultCode func(Core::System&, Handle, Svc::ThreadActivity)>
+template <Result func(Core::System&, Handle, Svc::ThreadActivity)>
void SvcWrap32(Core::System& system) {
const u32 retval = func(system, static_cast<Handle>(Param(system, 0)),
static_cast<Svc::ThreadActivity>(Param(system, 1)))
@@ -572,7 +571,7 @@ void SvcWrap32(Core::System& system) {
}
// Used by SetThreadPriority32
-template <ResultCode func(Core::System&, Handle, u32)>
+template <Result func(Core::System&, Handle, u32)>
void SvcWrap32(Core::System& system) {
const u32 retval =
func(system, static_cast<Handle>(Param(system, 0)), static_cast<u32>(Param(system, 1))).raw;
@@ -580,7 +579,7 @@ void SvcWrap32(Core::System& system) {
}
// Used by SetMemoryAttribute32
-template <ResultCode func(Core::System&, Handle, u32, u32, u32)>
+template <Result func(Core::System&, Handle, u32, u32, u32)>
void SvcWrap32(Core::System& system) {
const u32 retval =
func(system, static_cast<Handle>(Param(system, 0)), static_cast<u32>(Param(system, 1)),
@@ -590,7 +589,7 @@ void SvcWrap32(Core::System& system) {
}
// Used by MapSharedMemory32
-template <ResultCode func(Core::System&, Handle, u32, u32, Svc::MemoryPermission)>
+template <Result func(Core::System&, Handle, u32, u32, Svc::MemoryPermission)>
void SvcWrap32(Core::System& system) {
const u32 retval = func(system, static_cast<Handle>(Param(system, 0)),
static_cast<u32>(Param(system, 1)), static_cast<u32>(Param(system, 2)),
@@ -600,7 +599,7 @@ void SvcWrap32(Core::System& system) {
}
// Used by SetThreadCoreMask32
-template <ResultCode func(Core::System&, Handle, s32, u32, u32)>
+template <Result func(Core::System&, Handle, s32, u32, u32)>
void SvcWrap32(Core::System& system) {
const u32 retval =
func(system, static_cast<Handle>(Param(system, 0)), static_cast<s32>(Param(system, 1)),
@@ -610,7 +609,7 @@ void SvcWrap32(Core::System& system) {
}
// Used by WaitProcessWideKeyAtomic32
-template <ResultCode func(Core::System&, u32, u32, Handle, u32, u32)>
+template <Result func(Core::System&, u32, u32, Handle, u32, u32)>
void SvcWrap32(Core::System& system) {
const u32 retval =
func(system, static_cast<u32>(Param(system, 0)), static_cast<u32>(Param(system, 1)),
@@ -621,7 +620,7 @@ void SvcWrap32(Core::System& system) {
}
// Used by WaitForAddress32
-template <ResultCode func(Core::System&, u32, Svc::ArbitrationType, s32, u32, u32)>
+template <Result func(Core::System&, u32, Svc::ArbitrationType, s32, u32, u32)>
void SvcWrap32(Core::System& system) {
const u32 retval = func(system, static_cast<u32>(Param(system, 0)),
static_cast<Svc::ArbitrationType>(Param(system, 1)),
@@ -632,7 +631,7 @@ void SvcWrap32(Core::System& system) {
}
// Used by SignalToAddress32
-template <ResultCode func(Core::System&, u32, Svc::SignalType, s32, s32)>
+template <Result func(Core::System&, u32, Svc::SignalType, s32, s32)>
void SvcWrap32(Core::System& system) {
const u32 retval = func(system, static_cast<u32>(Param(system, 0)),
static_cast<Svc::SignalType>(Param(system, 1)),
@@ -642,13 +641,13 @@ void SvcWrap32(Core::System& system) {
}
// Used by SendSyncRequest32, ArbitrateUnlock32
-template <ResultCode func(Core::System&, u32)>
+template <Result func(Core::System&, u32)>
void SvcWrap32(Core::System& system) {
FuncReturn(system, func(system, static_cast<u32>(Param(system, 0))).raw);
}
// Used by CreateTransferMemory32
-template <ResultCode func(Core::System&, Handle*, u32, u32, Svc::MemoryPermission)>
+template <Result func(Core::System&, Handle*, u32, u32, Svc::MemoryPermission)>
void SvcWrap32(Core::System& system) {
Handle handle = 0;
const u32 retval = func(system, &handle, Param32(system, 1), Param32(system, 2),
@@ -659,7 +658,7 @@ void SvcWrap32(Core::System& system) {
}
// Used by WaitSynchronization32
-template <ResultCode func(Core::System&, u32, u32, s32, u32, s32*)>
+template <Result func(Core::System&, u32, u32, s32, u32, s32*)>
void SvcWrap32(Core::System& system) {
s32 param_1 = 0;
const u32 retval = func(system, Param32(system, 0), Param32(system, 1), Param32(system, 2),
@@ -669,4 +668,26 @@ void SvcWrap32(Core::System& system) {
FuncReturn(system, retval);
}
+// Used by CreateCodeMemory32
+template <Result func(Core::System&, Handle*, u32, u32)>
+void SvcWrap32(Core::System& system) {
+ Handle handle = 0;
+
+ const u32 retval = func(system, &handle, Param32(system, 1), Param32(system, 2)).raw;
+
+ system.CurrentArmInterface().SetReg(1, handle);
+ FuncReturn(system, retval);
+}
+
+// Used by ControlCodeMemory32
+template <Result func(Core::System&, Handle, u32, u64, u64, Svc::MemoryPermission)>
+void SvcWrap32(Core::System& system) {
+ const u32 retval =
+ func(system, Param32(system, 0), Param32(system, 1), Param(system, 2), Param(system, 4),
+ static_cast<Svc::MemoryPermission>(Param32(system, 6)))
+ .raw;
+
+ FuncReturn(system, retval);
+}
+
} // namespace Kernel
diff --git a/src/core/hle/kernel/time_manager.cpp b/src/core/hle/kernel/time_manager.cpp
index aa985d820..5ee72c432 100644
--- a/src/core/hle/kernel/time_manager.cpp
+++ b/src/core/hle/kernel/time_manager.cpp
@@ -1,6 +1,5 @@
-// Copyright 2020 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#include "common/assert.h"
#include "core/core.h"
@@ -12,19 +11,21 @@
namespace Kernel {
TimeManager::TimeManager(Core::System& system_) : system{system_} {
- time_manager_event_type =
- Core::Timing::CreateEvent("Kernel::TimeManagerCallback",
- [this](std::uintptr_t thread_handle, std::chrono::nanoseconds) {
- KThread* thread = reinterpret_cast<KThread*>(thread_handle);
- {
- KScopedSchedulerLock sl(system.Kernel());
- thread->OnTimer();
- }
- });
+ time_manager_event_type = Core::Timing::CreateEvent(
+ "Kernel::TimeManagerCallback",
+ [this](std::uintptr_t thread_handle, s64 time,
+ std::chrono::nanoseconds) -> std::optional<std::chrono::nanoseconds> {
+ KThread* thread = reinterpret_cast<KThread*>(thread_handle);
+ {
+ KScopedSchedulerLock sl(system.Kernel());
+ thread->OnTimer();
+ }
+ return std::nullopt;
+ });
}
void TimeManager::ScheduleTimeEvent(KThread* thread, s64 nanoseconds) {
- std::lock_guard lock{mutex};
+ std::scoped_lock lock{mutex};
if (nanoseconds > 0) {
ASSERT(thread);
ASSERT(thread->GetState() != ThreadState::Runnable);
@@ -35,7 +36,7 @@ void TimeManager::ScheduleTimeEvent(KThread* thread, s64 nanoseconds) {
}
void TimeManager::UnscheduleTimeEvent(KThread* thread) {
- std::lock_guard lock{mutex};
+ std::scoped_lock lock{mutex};
system.CoreTiming().UnscheduleEvent(time_manager_event_type,
reinterpret_cast<uintptr_t>(thread));
}
diff --git a/src/core/hle/kernel/time_manager.h b/src/core/hle/kernel/time_manager.h
index b1fa26e8c..94d16b3b4 100644
--- a/src/core/hle/kernel/time_manager.h
+++ b/src/core/hle/kernel/time_manager.h
@@ -1,6 +1,5 @@
-// Copyright 2020 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once