summaryrefslogtreecommitdiffstats
path: root/src/core/hle/kernel/k_process.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/hle/kernel/k_process.cpp')
-rw-r--r--src/core/hle/kernel/k_process.cpp365
1 files changed, 222 insertions, 143 deletions
diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp
index 85c506979..d3e99665f 100644
--- a/src/core/hle/kernel/k_process.cpp
+++ b/src/core/hle/kernel/k_process.cpp
@@ -1,6 +1,5 @@
-// Copyright 2015 Citra Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: 2015 Citra Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#include <algorithm>
#include <bitset>
@@ -13,7 +12,6 @@
#include "common/scope_exit.h"
#include "common/settings.h"
#include "core/core.h"
-#include "core/device_memory.h"
#include "core/file_sys/program_metadata.h"
#include "core/hle/kernel/code_set.h"
#include "core/hle/kernel/k_memory_block_manager.h"
@@ -24,7 +22,6 @@
#include "core/hle/kernel/k_scoped_resource_reservation.h"
#include "core/hle/kernel/k_shared_memory.h"
#include "core/hle/kernel/k_shared_memory_info.h"
-#include "core/hle/kernel/k_slab_heap.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/svc_results.h"
@@ -59,76 +56,22 @@ void SetupMainThread(Core::System& system, KProcess& owner_process, u32 priority
thread->GetContext64().cpu_registers[0] = 0;
thread->GetContext32().cpu_registers[1] = thread_handle;
thread->GetContext64().cpu_registers[1] = thread_handle;
- thread->DisableDispatch();
- auto& kernel = system.Kernel();
- // Threads by default are dormant, wake up the main thread so it runs when the scheduler fires
- {
- KScopedSchedulerLock lock{kernel};
- thread->SetState(ThreadState::Runnable);
+ if (system.DebuggerEnabled()) {
+ thread->RequestSuspend(SuspendType::Debug);
}
+
+ // Run our thread.
+ void(thread->Run());
}
} // Anonymous namespace
-// Represents a page used for thread-local storage.
-//
-// Each TLS page contains slots that may be used by processes and threads.
-// Every process and thread is created with a slot in some arbitrary page
-// (whichever page happens to have an available slot).
-class TLSPage {
-public:
- static constexpr std::size_t num_slot_entries =
- Core::Memory::PAGE_SIZE / Core::Memory::TLS_ENTRY_SIZE;
-
- explicit TLSPage(VAddr address) : base_address{address} {}
-
- bool HasAvailableSlots() const {
- return !is_slot_used.all();
- }
-
- VAddr GetBaseAddress() const {
- return base_address;
- }
-
- std::optional<VAddr> ReserveSlot() {
- for (std::size_t i = 0; i < is_slot_used.size(); i++) {
- if (is_slot_used[i]) {
- continue;
- }
-
- is_slot_used[i] = true;
- return base_address + (i * Core::Memory::TLS_ENTRY_SIZE);
- }
-
- return std::nullopt;
- }
-
- void ReleaseSlot(VAddr address) {
- // Ensure that all given addresses are consistent with how TLS pages
- // are intended to be used when releasing slots.
- ASSERT(IsWithinPage(address));
- ASSERT((address % Core::Memory::TLS_ENTRY_SIZE) == 0);
-
- const std::size_t index = (address - base_address) / Core::Memory::TLS_ENTRY_SIZE;
- is_slot_used[index] = false;
- }
-
-private:
- bool IsWithinPage(VAddr address) const {
- return base_address <= address && address < base_address + Core::Memory::PAGE_SIZE;
- }
-
- VAddr base_address;
- std::bitset<num_slot_entries> is_slot_used;
-};
-
-ResultCode KProcess::Initialize(KProcess* process, Core::System& system, std::string process_name,
- ProcessType type) {
+Result KProcess::Initialize(KProcess* process, Core::System& system, std::string process_name,
+ ProcessType type, KResourceLimit* res_limit) {
auto& kernel = system.Kernel();
process->name = std::move(process_name);
-
- process->resource_limit = kernel.GetSystemResourceLimit();
+ process->resource_limit = res_limit;
process->status = ProcessStatus::Created;
process->program_id = 0;
process->process_id = type == ProcessType::KernelInternal ? kernel.CreateNewKernelProcessID()
@@ -143,9 +86,6 @@ ResultCode KProcess::Initialize(KProcess* process, Core::System& system, std::st
kernel.AppendNewProcess(process);
- // Open a reference to the resource limit.
- process->resource_limit->Open();
-
// Clear remaining fields.
process->num_running_threads = 0;
process->is_signaled = false;
@@ -153,6 +93,9 @@ ResultCode KProcess::Initialize(KProcess* process, Core::System& system, std::st
process->is_suspended = false;
process->schedule_count = 0;
+ // Open a reference to the resource limit.
+ process->resource_limit->Open();
+
return ResultSuccess;
}
@@ -217,7 +160,7 @@ bool KProcess::ReleaseUserException(KThread* thread) {
std::addressof(num_waiters),
reinterpret_cast<uintptr_t>(std::addressof(exception_thread)));
next != nullptr) {
- next->SetState(ThreadState::Runnable);
+ next->EndWait(ResultSuccess);
}
KScheduler::SetSchedulerUpdateNeeded(kernel);
@@ -232,7 +175,8 @@ void KProcess::PinCurrentThread(s32 core_id) {
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
// Get the current thread.
- KThread* cur_thread = kernel.Scheduler(static_cast<std::size_t>(core_id)).GetCurrentThread();
+ KThread* cur_thread =
+ kernel.Scheduler(static_cast<std::size_t>(core_id)).GetSchedulerCurrentThread();
// If the thread isn't terminated, pin it.
if (!cur_thread->IsTerminationRequested()) {
@@ -249,7 +193,8 @@ void KProcess::UnpinCurrentThread(s32 core_id) {
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
// Get the current thread.
- KThread* cur_thread = kernel.Scheduler(static_cast<std::size_t>(core_id)).GetCurrentThread();
+ KThread* cur_thread =
+ kernel.Scheduler(static_cast<std::size_t>(core_id)).GetSchedulerCurrentThread();
// Unpin it.
cur_thread->Unpin();
@@ -273,8 +218,8 @@ void KProcess::UnpinThread(KThread* thread) {
KScheduler::SetSchedulerUpdateNeeded(kernel);
}
-ResultCode KProcess::AddSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr address,
- [[maybe_unused]] size_t size) {
+Result KProcess::AddSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr address,
+ [[maybe_unused]] size_t size) {
// Lock ourselves, to prevent concurrent access.
KScopedLightLock lk(state_lock);
@@ -326,15 +271,19 @@ void KProcess::RemoveSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr a
shmem->Close();
}
-void KProcess::RegisterThread(const KThread* thread) {
+void KProcess::RegisterThread(KThread* thread) {
+ KScopedLightLock lk{list_lock};
+
thread_list.push_back(thread);
}
-void KProcess::UnregisterThread(const KThread* thread) {
+void KProcess::UnregisterThread(KThread* thread) {
+ KScopedLightLock lk{list_lock};
+
thread_list.remove(thread);
}
-ResultCode KProcess::Reset() {
+Result KProcess::Reset() {
// Lock the process and the scheduler.
KScopedLightLock lk(state_lock);
KScopedSchedulerLock sl{kernel};
@@ -348,8 +297,51 @@ ResultCode KProcess::Reset() {
return ResultSuccess;
}
-ResultCode KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata,
- std::size_t code_size) {
+Result KProcess::SetActivity(ProcessActivity activity) {
+ // Lock ourselves and the scheduler.
+ KScopedLightLock lk{state_lock};
+ KScopedLightLock list_lk{list_lock};
+ KScopedSchedulerLock sl{kernel};
+
+ // Validate our state.
+ R_UNLESS(status != ProcessStatus::Exiting, ResultInvalidState);
+ R_UNLESS(status != ProcessStatus::Exited, ResultInvalidState);
+
+ // Either pause or resume.
+ if (activity == ProcessActivity::Paused) {
+ // Verify that we're not suspended.
+ if (is_suspended) {
+ return ResultInvalidState;
+ }
+
+ // Suspend all threads.
+ for (auto* thread : GetThreadList()) {
+ thread->RequestSuspend(SuspendType::Process);
+ }
+
+ // Set ourselves as suspended.
+ SetSuspended(true);
+ } else {
+ ASSERT(activity == ProcessActivity::Runnable);
+
+ // Verify that we're suspended.
+ if (!is_suspended) {
+ return ResultInvalidState;
+ }
+
+ // Resume all threads.
+ for (auto* thread : GetThreadList()) {
+ thread->Resume(SuspendType::Process);
+ }
+
+ // Set ourselves as resumed.
+ SetSuspended(false);
+ }
+
+ return ResultSuccess;
+}
+
+Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size) {
program_id = metadata.GetTitleID();
ideal_core = metadata.GetMainThreadCore();
is_64bit_process = metadata.Is64BitProgram();
@@ -364,24 +356,24 @@ ResultCode KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata,
return ResultLimitReached;
}
// Initialize proces address space
- if (const ResultCode result{
- page_table->InitializeForProcess(metadata.GetAddressSpaceType(), false, 0x8000000,
- code_size, KMemoryManager::Pool::Application)};
+ if (const Result result{page_table->InitializeForProcess(metadata.GetAddressSpaceType(), false,
+ 0x8000000, code_size,
+ KMemoryManager::Pool::Application)};
result.IsError()) {
return result;
}
// Map process code region
- if (const ResultCode result{page_table->MapProcessCode(page_table->GetCodeRegionStart(),
- code_size / PageSize, KMemoryState::Code,
- KMemoryPermission::None)};
+ if (const Result result{page_table->MapProcessCode(page_table->GetCodeRegionStart(),
+ code_size / PageSize, KMemoryState::Code,
+ KMemoryPermission::None)};
result.IsError()) {
return result;
}
// Initialize process capabilities
const auto& caps{metadata.GetKernelCapabilities()};
- if (const ResultCode result{
+ if (const Result result{
capabilities.InitializeForUserProcess(caps.data(), caps.size(), *page_table)};
result.IsError()) {
return result;
@@ -401,11 +393,11 @@ ResultCode KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata,
break;
default:
- UNREACHABLE();
+ ASSERT(false);
}
// Create TLS region
- tls_region_address = CreateTLSRegion();
+ R_TRY(this->CreateThreadLocalRegion(std::addressof(tls_region_address)));
memory_reservation.Commit();
return handle_table.Initialize(capabilities.GetHandleTableSize());
@@ -428,11 +420,11 @@ void KProcess::PrepareForTermination() {
ChangeStatus(ProcessStatus::Exiting);
const auto stop_threads = [this](const std::vector<KThread*>& in_thread_list) {
- for (auto& thread : in_thread_list) {
+ for (auto* thread : in_thread_list) {
if (thread->GetOwnerProcess() != this)
continue;
- if (thread == kernel.CurrentScheduler()->GetCurrentThread())
+ if (thread == GetCurrentThreadPointer(kernel))
continue;
// TODO(Subv): When are the other running/ready threads terminated?
@@ -445,7 +437,7 @@ void KProcess::PrepareForTermination() {
stop_threads(kernel.System().GlobalSchedulerContext().GetThreadList());
- FreeTLSRegion(tls_region_address);
+ this->DeleteThreadLocalRegion(tls_region_address);
tls_region_address = 0;
if (resource_limit) {
@@ -457,9 +449,6 @@ void KProcess::PrepareForTermination() {
}
void KProcess::Finalize() {
- // Finalize the handle table and close any open handles.
- handle_table.Finalize();
-
// Free all shared memory infos.
{
auto it = shared_memory_list.begin();
@@ -484,67 +473,156 @@ void KProcess::Finalize() {
resource_limit = nullptr;
}
+ // Finalize the page table.
+ page_table.reset();
+
// Perform inherited finalization.
KAutoObjectWithSlabHeapAndContainer<KProcess, KWorkerTask>::Finalize();
}
-/**
- * Attempts to find a TLS page that contains a free slot for
- * use by a thread.
- *
- * @returns If a page with an available slot is found, then an iterator
- * pointing to the page is returned. Otherwise the end iterator
- * is returned instead.
- */
-static auto FindTLSPageWithAvailableSlots(std::vector<TLSPage>& tls_pages) {
- return std::find_if(tls_pages.begin(), tls_pages.end(),
- [](const auto& page) { return page.HasAvailableSlots(); });
+Result KProcess::CreateThreadLocalRegion(VAddr* out) {
+ KThreadLocalPage* tlp = nullptr;
+ VAddr tlr = 0;
+
+ // See if we can get a region from a partially used TLP.
+ {
+ KScopedSchedulerLock sl{kernel};
+
+ if (auto it = partially_used_tlp_tree.begin(); it != partially_used_tlp_tree.end()) {
+ tlr = it->Reserve();
+ ASSERT(tlr != 0);
+
+ if (it->IsAllUsed()) {
+ tlp = std::addressof(*it);
+ partially_used_tlp_tree.erase(it);
+ fully_used_tlp_tree.insert(*tlp);
+ }
+
+ *out = tlr;
+ return ResultSuccess;
+ }
+ }
+
+ // Allocate a new page.
+ tlp = KThreadLocalPage::Allocate(kernel);
+ R_UNLESS(tlp != nullptr, ResultOutOfMemory);
+ auto tlp_guard = SCOPE_GUARD({ KThreadLocalPage::Free(kernel, tlp); });
+
+ // Initialize the new page.
+ R_TRY(tlp->Initialize(kernel, this));
+
+ // Reserve a TLR.
+ tlr = tlp->Reserve();
+ ASSERT(tlr != 0);
+
+ // Insert into our tree.
+ {
+ KScopedSchedulerLock sl{kernel};
+ if (tlp->IsAllUsed()) {
+ fully_used_tlp_tree.insert(*tlp);
+ } else {
+ partially_used_tlp_tree.insert(*tlp);
+ }
+ }
+
+ // We succeeded!
+ tlp_guard.Cancel();
+ *out = tlr;
+ return ResultSuccess;
}
-VAddr KProcess::CreateTLSRegion() {
- KScopedSchedulerLock lock(kernel);
- if (auto tls_page_iter{FindTLSPageWithAvailableSlots(tls_pages)};
- tls_page_iter != tls_pages.cend()) {
- return *tls_page_iter->ReserveSlot();
+Result KProcess::DeleteThreadLocalRegion(VAddr addr) {
+ KThreadLocalPage* page_to_free = nullptr;
+
+ // Release the region.
+ {
+ KScopedSchedulerLock sl{kernel};
+
+ // Try to find the page in the partially used list.
+ auto it = partially_used_tlp_tree.find_key(Common::AlignDown(addr, PageSize));
+ if (it == partially_used_tlp_tree.end()) {
+ // If we don't find it, it has to be in the fully used list.
+ it = fully_used_tlp_tree.find_key(Common::AlignDown(addr, PageSize));
+ R_UNLESS(it != fully_used_tlp_tree.end(), ResultInvalidAddress);
+
+ // Release the region.
+ it->Release(addr);
+
+ // Move the page out of the fully used list.
+ KThreadLocalPage* tlp = std::addressof(*it);
+ fully_used_tlp_tree.erase(it);
+ if (tlp->IsAllFree()) {
+ page_to_free = tlp;
+ } else {
+ partially_used_tlp_tree.insert(*tlp);
+ }
+ } else {
+ // Release the region.
+ it->Release(addr);
+
+ // Handle the all-free case.
+ KThreadLocalPage* tlp = std::addressof(*it);
+ if (tlp->IsAllFree()) {
+ partially_used_tlp_tree.erase(it);
+ page_to_free = tlp;
+ }
+ }
+ }
+
+ // If we should free the page it was in, do so.
+ if (page_to_free != nullptr) {
+ page_to_free->Finalize();
+
+ KThreadLocalPage::Free(kernel, page_to_free);
}
- Page* const tls_page_ptr{kernel.GetUserSlabHeapPages().Allocate()};
- ASSERT(tls_page_ptr);
+ return ResultSuccess;
+}
- const VAddr start{page_table->GetKernelMapRegionStart()};
- const VAddr size{page_table->GetKernelMapRegionEnd() - start};
- const PAddr tls_map_addr{kernel.System().DeviceMemory().GetPhysicalAddr(tls_page_ptr)};
- const VAddr tls_page_addr{page_table
- ->AllocateAndMapMemory(1, PageSize, true, start, size / PageSize,
- KMemoryState::ThreadLocal,
- KMemoryPermission::UserReadWrite,
- tls_map_addr)
- .ValueOr(0)};
+bool KProcess::InsertWatchpoint(Core::System& system, VAddr addr, u64 size,
+ DebugWatchpointType type) {
+ const auto watch{std::find_if(watchpoints.begin(), watchpoints.end(), [&](const auto& wp) {
+ return wp.type == DebugWatchpointType::None;
+ })};
- ASSERT(tls_page_addr);
+ if (watch == watchpoints.end()) {
+ return false;
+ }
- std::memset(tls_page_ptr, 0, PageSize);
- tls_pages.emplace_back(tls_page_addr);
+ watch->start_address = addr;
+ watch->end_address = addr + size;
+ watch->type = type;
- const auto reserve_result{tls_pages.back().ReserveSlot()};
- ASSERT(reserve_result.has_value());
+ for (VAddr page = Common::AlignDown(addr, PageSize); page < addr + size; page += PageSize) {
+ debug_page_refcounts[page]++;
+ system.Memory().MarkRegionDebug(page, PageSize, true);
+ }
- return *reserve_result;
+ return true;
}
-void KProcess::FreeTLSRegion(VAddr tls_address) {
- KScopedSchedulerLock lock(kernel);
- const VAddr aligned_address = Common::AlignDown(tls_address, Core::Memory::PAGE_SIZE);
- auto iter =
- std::find_if(tls_pages.begin(), tls_pages.end(), [aligned_address](const auto& page) {
- return page.GetBaseAddress() == aligned_address;
- });
+bool KProcess::RemoveWatchpoint(Core::System& system, VAddr addr, u64 size,
+ DebugWatchpointType type) {
+ const auto watch{std::find_if(watchpoints.begin(), watchpoints.end(), [&](const auto& wp) {
+ return wp.start_address == addr && wp.end_address == addr + size && wp.type == type;
+ })};
- // Something has gone very wrong if we're freeing a region
- // with no actual page available.
- ASSERT(iter != tls_pages.cend());
+ if (watch == watchpoints.end()) {
+ return false;
+ }
+
+ watch->start_address = 0;
+ watch->end_address = 0;
+ watch->type = DebugWatchpointType::None;
+
+ for (VAddr page = Common::AlignDown(addr, PageSize); page < addr + size; page += PageSize) {
+ debug_page_refcounts[page]--;
+ if (!debug_page_refcounts[page]) {
+ system.Memory().MarkRegionDebug(page, PageSize, false);
+ }
+ }
- iter->ReleaseSlot(tls_address);
+ return true;
}
void KProcess::LoadModule(CodeSet code_set, VAddr base_addr) {
@@ -567,9 +645,10 @@ bool KProcess::IsSignaled() const {
}
KProcess::KProcess(KernelCore& kernel_)
- : KAutoObjectWithSlabHeapAndContainer{kernel_},
- page_table{std::make_unique<KPageTable>(kernel_.System())}, handle_table{kernel_},
- address_arbiter{kernel_.System()}, condition_var{kernel_.System()}, state_lock{kernel_} {}
+ : KAutoObjectWithSlabHeapAndContainer{kernel_}, page_table{std::make_unique<KPageTable>(
+ kernel_.System())},
+ handle_table{kernel_}, address_arbiter{kernel_.System()}, condition_var{kernel_.System()},
+ state_lock{kernel_}, list_lock{kernel_} {}
KProcess::~KProcess() = default;
@@ -583,7 +662,7 @@ void KProcess::ChangeStatus(ProcessStatus new_status) {
NotifyAvailable();
}
-ResultCode KProcess::AllocateMainThreadStack(std::size_t stack_size) {
+Result KProcess::AllocateMainThreadStack(std::size_t stack_size) {
ASSERT(stack_size);
// The kernel always ensures that the given stack size is page aligned.