summaryrefslogtreecommitdiffstats
path: root/src/core/hle/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/hle/kernel')
-rw-r--r--src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp21
-rw-r--r--src/core/hle/kernel/hle_ipc.cpp549
-rw-r--r--src/core/hle/kernel/hle_ipc.h416
-rw-r--r--src/core/hle/kernel/init/init_slab_setup.cpp8
-rw-r--r--src/core/hle/kernel/k_address_arbiter.cpp8
-rw-r--r--src/core/hle/kernel/k_address_space_info.cpp79
-rw-r--r--src/core/hle/kernel/k_address_space_info.h2
-rw-r--r--src/core/hle/kernel/k_client_port.cpp1
-rw-r--r--src/core/hle/kernel/k_client_port.h1
-rw-r--r--src/core/hle/kernel/k_client_session.cpp1
-rw-r--r--src/core/hle/kernel/k_condition_variable.cpp50
-rw-r--r--src/core/hle/kernel/k_device_address_space.h4
-rw-r--r--src/core/hle/kernel/k_light_lock.cpp8
-rw-r--r--src/core/hle/kernel/k_port.cpp1
-rw-r--r--src/core/hle/kernel/k_process.cpp10
-rw-r--r--src/core/hle/kernel/k_process.h4
-rw-r--r--src/core/hle/kernel/k_resource_limit.cpp3
-rw-r--r--src/core/hle/kernel/k_scheduler_lock.h11
-rw-r--r--src/core/hle/kernel/k_scoped_lock.h1
-rw-r--r--src/core/hle/kernel/k_server_session.cpp11
-rw-r--r--src/core/hle/kernel/k_server_session.h12
-rw-r--r--src/core/hle/kernel/k_thread.cpp291
-rw-r--r--src/core/hle/kernel/k_thread.h179
-rw-r--r--src/core/hle/kernel/kernel.cpp300
-rw-r--r--src/core/hle/kernel/kernel.h136
-rw-r--r--src/core/hle/kernel/service_thread.cpp206
-rw-r--r--src/core/hle/kernel/service_thread.h29
-rw-r--r--src/core/hle/kernel/svc/svc_info.cpp5
-rw-r--r--src/core/hle/kernel/svc/svc_port.cpp51
-rw-r--r--src/core/hle/kernel/svc/svc_synchronization.cpp45
-rw-r--r--src/core/hle/kernel/svc_types.h1
31 files changed, 725 insertions, 1719 deletions
diff --git a/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp b/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp
index c10b7bf30..5b8a248c8 100644
--- a/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp
+++ b/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp
@@ -14,9 +14,12 @@ namespace Kernel::Board::Nintendo::Nx {
namespace impl {
-constexpr const std::size_t RequiredNonSecureSystemMemorySizeVi = 0x2238 * 4 * 1024;
-constexpr const std::size_t RequiredNonSecureSystemMemorySizeNvservices = 0x710 * 4 * 1024;
-constexpr const std::size_t RequiredNonSecureSystemMemorySizeMisc = 0x80 * 4 * 1024;
+using namespace Common::Literals;
+
+constexpr const std::size_t RequiredNonSecureSystemMemorySizeVi = 0x2280 * 4_KiB;
+constexpr const std::size_t RequiredNonSecureSystemMemorySizeViFatal = 0x200 * 4_KiB;
+constexpr const std::size_t RequiredNonSecureSystemMemorySizeNvservices = 0x704 * 4_KiB;
+constexpr const std::size_t RequiredNonSecureSystemMemorySizeMisc = 0x80 * 4_KiB;
} // namespace impl
@@ -24,6 +27,9 @@ constexpr const std::size_t RequiredNonSecureSystemMemorySize =
impl::RequiredNonSecureSystemMemorySizeVi + impl::RequiredNonSecureSystemMemorySizeNvservices +
impl::RequiredNonSecureSystemMemorySizeMisc;
+constexpr const std::size_t RequiredNonSecureSystemMemorySizeWithFatal =
+ RequiredNonSecureSystemMemorySize + impl::RequiredNonSecureSystemMemorySizeViFatal;
+
namespace {
using namespace Common::Literals;
@@ -120,10 +126,13 @@ size_t KSystemControl::Init::GetAppletPoolSize() {
size_t KSystemControl::Init::GetMinimumNonSecureSystemPoolSize() {
// Verify that our minimum is at least as large as Nintendo's.
- constexpr size_t MinimumSize = RequiredNonSecureSystemMemorySize;
- static_assert(MinimumSize >= 0x29C8000);
+ constexpr size_t MinimumSizeWithFatal = RequiredNonSecureSystemMemorySizeWithFatal;
+ static_assert(MinimumSizeWithFatal >= 0x2C04000);
+
+ constexpr size_t MinimumSizeWithoutFatal = RequiredNonSecureSystemMemorySize;
+ static_assert(MinimumSizeWithoutFatal >= 0x2A00000);
- return MinimumSize;
+ return MinimumSizeWithFatal;
}
namespace {
diff --git a/src/core/hle/kernel/hle_ipc.cpp b/src/core/hle/kernel/hle_ipc.cpp
deleted file mode 100644
index 494151eef..000000000
--- a/src/core/hle/kernel/hle_ipc.cpp
+++ /dev/null
@@ -1,549 +0,0 @@
-// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-#include <algorithm>
-#include <array>
-#include <sstream>
-
-#include <boost/range/algorithm_ext/erase.hpp>
-
-#include "common/assert.h"
-#include "common/common_funcs.h"
-#include "common/common_types.h"
-#include "common/logging/log.h"
-#include "common/scratch_buffer.h"
-#include "core/hle/ipc_helpers.h"
-#include "core/hle/kernel/hle_ipc.h"
-#include "core/hle/kernel/k_auto_object.h"
-#include "core/hle/kernel/k_handle_table.h"
-#include "core/hle/kernel/k_process.h"
-#include "core/hle/kernel/k_server_port.h"
-#include "core/hle/kernel/k_server_session.h"
-#include "core/hle/kernel/k_thread.h"
-#include "core/hle/kernel/kernel.h"
-#include "core/hle/kernel/service_thread.h"
-#include "core/memory.h"
-
-namespace Kernel {
-
-SessionRequestHandler::SessionRequestHandler(KernelCore& kernel_, const char* service_name_,
- ServiceThreadType thread_type)
- : kernel{kernel_}, service_thread{thread_type == ServiceThreadType::CreateNew
- ? kernel.CreateServiceThread(service_name_)
- : kernel.GetDefaultServiceThread()} {}
-
-SessionRequestHandler::~SessionRequestHandler() {
- kernel.ReleaseServiceThread(service_thread);
-}
-
-void SessionRequestHandler::AcceptSession(KServerPort* server_port) {
- auto* server_session = server_port->AcceptSession();
- ASSERT(server_session != nullptr);
-
- RegisterSession(server_session, std::make_shared<SessionRequestManager>(kernel));
-}
-
-void SessionRequestHandler::RegisterSession(KServerSession* server_session,
- std::shared_ptr<SessionRequestManager> manager) {
- manager->SetSessionHandler(shared_from_this());
- service_thread.RegisterServerSession(server_session, manager);
- server_session->Close();
-}
-
-SessionRequestManager::SessionRequestManager(KernelCore& kernel_) : kernel{kernel_} {}
-
-SessionRequestManager::~SessionRequestManager() = default;
-
-bool SessionRequestManager::HasSessionRequestHandler(const HLERequestContext& context) const {
- if (IsDomain() && context.HasDomainMessageHeader()) {
- const auto& message_header = context.GetDomainMessageHeader();
- const auto object_id = message_header.object_id;
-
- if (object_id > DomainHandlerCount()) {
- LOG_CRITICAL(IPC, "object_id {} is too big!", object_id);
- return false;
- }
- return !DomainHandler(object_id - 1).expired();
- } else {
- return session_handler != nullptr;
- }
-}
-
-Result SessionRequestManager::CompleteSyncRequest(KServerSession* server_session,
- HLERequestContext& context) {
- Result result = ResultSuccess;
-
- // If the session has been converted to a domain, handle the domain request
- if (this->HasSessionRequestHandler(context)) {
- if (IsDomain() && context.HasDomainMessageHeader()) {
- result = HandleDomainSyncRequest(server_session, context);
- // If there is no domain header, the regular session handler is used
- } else if (this->HasSessionHandler()) {
- // If this manager has an associated HLE handler, forward the request to it.
- result = this->SessionHandler().HandleSyncRequest(*server_session, context);
- }
- } else {
- ASSERT_MSG(false, "Session handler is invalid, stubbing response!");
- IPC::ResponseBuilder rb(context, 2);
- rb.Push(ResultSuccess);
- }
-
- if (convert_to_domain) {
- ASSERT_MSG(!IsDomain(), "ServerSession is already a domain instance.");
- this->ConvertToDomain();
- convert_to_domain = false;
- }
-
- return result;
-}
-
-Result SessionRequestManager::HandleDomainSyncRequest(KServerSession* server_session,
- HLERequestContext& context) {
- if (!context.HasDomainMessageHeader()) {
- return ResultSuccess;
- }
-
- // Set domain handlers in HLE context, used for domain objects (IPC interfaces) as inputs
- ASSERT(context.GetManager().get() == this);
-
- // If there is a DomainMessageHeader, then this is CommandType "Request"
- const auto& domain_message_header = context.GetDomainMessageHeader();
- const u32 object_id{domain_message_header.object_id};
- switch (domain_message_header.command) {
- case IPC::DomainMessageHeader::CommandType::SendMessage:
- if (object_id > this->DomainHandlerCount()) {
- LOG_CRITICAL(IPC,
- "object_id {} is too big! This probably means a recent service call "
- "needed to return a new interface!",
- object_id);
- ASSERT(false);
- return ResultSuccess; // Ignore error if asserts are off
- }
- if (auto strong_ptr = this->DomainHandler(object_id - 1).lock()) {
- return strong_ptr->HandleSyncRequest(*server_session, context);
- } else {
- ASSERT(false);
- return ResultSuccess;
- }
-
- case IPC::DomainMessageHeader::CommandType::CloseVirtualHandle: {
- LOG_DEBUG(IPC, "CloseVirtualHandle, object_id=0x{:08X}", object_id);
-
- this->CloseDomainHandler(object_id - 1);
-
- IPC::ResponseBuilder rb{context, 2};
- rb.Push(ResultSuccess);
- return ResultSuccess;
- }
- }
-
- LOG_CRITICAL(IPC, "Unknown domain command={}", domain_message_header.command.Value());
- ASSERT(false);
- return ResultSuccess;
-}
-
-HLERequestContext::HLERequestContext(KernelCore& kernel_, Core::Memory::Memory& memory_,
- KServerSession* server_session_, KThread* thread_)
- : server_session(server_session_), thread(thread_), kernel{kernel_}, memory{memory_} {
- cmd_buf[0] = 0;
-}
-
-HLERequestContext::~HLERequestContext() = default;
-
-void HLERequestContext::ParseCommandBuffer(const KHandleTable& handle_table, u32_le* src_cmdbuf,
- bool incoming) {
- IPC::RequestParser rp(src_cmdbuf);
- command_header = rp.PopRaw<IPC::CommandHeader>();
-
- if (command_header->IsCloseCommand()) {
- // Close does not populate the rest of the IPC header
- return;
- }
-
- // If handle descriptor is present, add size of it
- if (command_header->enable_handle_descriptor) {
- handle_descriptor_header = rp.PopRaw<IPC::HandleDescriptorHeader>();
- if (handle_descriptor_header->send_current_pid) {
- pid = rp.Pop<u64>();
- }
- if (incoming) {
- // Populate the object lists with the data in the IPC request.
- incoming_copy_handles.reserve(handle_descriptor_header->num_handles_to_copy);
- incoming_move_handles.reserve(handle_descriptor_header->num_handles_to_move);
-
- for (u32 handle = 0; handle < handle_descriptor_header->num_handles_to_copy; ++handle) {
- incoming_copy_handles.push_back(rp.Pop<Handle>());
- }
- for (u32 handle = 0; handle < handle_descriptor_header->num_handles_to_move; ++handle) {
- incoming_move_handles.push_back(rp.Pop<Handle>());
- }
- } else {
- // For responses we just ignore the handles, they're empty and will be populated when
- // translating the response.
- rp.Skip(handle_descriptor_header->num_handles_to_copy, false);
- rp.Skip(handle_descriptor_header->num_handles_to_move, false);
- }
- }
-
- buffer_x_desciptors.reserve(command_header->num_buf_x_descriptors);
- buffer_a_desciptors.reserve(command_header->num_buf_a_descriptors);
- buffer_b_desciptors.reserve(command_header->num_buf_b_descriptors);
- buffer_w_desciptors.reserve(command_header->num_buf_w_descriptors);
-
- for (u32 i = 0; i < command_header->num_buf_x_descriptors; ++i) {
- buffer_x_desciptors.push_back(rp.PopRaw<IPC::BufferDescriptorX>());
- }
- for (u32 i = 0; i < command_header->num_buf_a_descriptors; ++i) {
- buffer_a_desciptors.push_back(rp.PopRaw<IPC::BufferDescriptorABW>());
- }
- for (u32 i = 0; i < command_header->num_buf_b_descriptors; ++i) {
- buffer_b_desciptors.push_back(rp.PopRaw<IPC::BufferDescriptorABW>());
- }
- for (u32 i = 0; i < command_header->num_buf_w_descriptors; ++i) {
- buffer_w_desciptors.push_back(rp.PopRaw<IPC::BufferDescriptorABW>());
- }
-
- const auto buffer_c_offset = rp.GetCurrentOffset() + command_header->data_size;
-
- if (!command_header->IsTipc()) {
- // Padding to align to 16 bytes
- rp.AlignWithPadding();
-
- if (GetManager()->IsDomain() &&
- ((command_header->type == IPC::CommandType::Request ||
- command_header->type == IPC::CommandType::RequestWithContext) ||
- !incoming)) {
- // If this is an incoming message, only CommandType "Request" has a domain header
- // All outgoing domain messages have the domain header, if only incoming has it
- if (incoming || domain_message_header) {
- domain_message_header = rp.PopRaw<IPC::DomainMessageHeader>();
- } else {
- if (GetManager()->IsDomain()) {
- LOG_WARNING(IPC, "Domain request has no DomainMessageHeader!");
- }
- }
- }
-
- data_payload_header = rp.PopRaw<IPC::DataPayloadHeader>();
-
- data_payload_offset = rp.GetCurrentOffset();
-
- if (domain_message_header &&
- domain_message_header->command ==
- IPC::DomainMessageHeader::CommandType::CloseVirtualHandle) {
- // CloseVirtualHandle command does not have SFC* or any data
- return;
- }
-
- if (incoming) {
- ASSERT(data_payload_header->magic == Common::MakeMagic('S', 'F', 'C', 'I'));
- } else {
- ASSERT(data_payload_header->magic == Common::MakeMagic('S', 'F', 'C', 'O'));
- }
- }
-
- rp.SetCurrentOffset(buffer_c_offset);
-
- // For Inline buffers, the response data is written directly to buffer_c_offset
- // and in this case we don't have any BufferDescriptorC on the request.
- if (command_header->buf_c_descriptor_flags >
- IPC::CommandHeader::BufferDescriptorCFlag::InlineDescriptor) {
- if (command_header->buf_c_descriptor_flags ==
- IPC::CommandHeader::BufferDescriptorCFlag::OneDescriptor) {
- buffer_c_desciptors.push_back(rp.PopRaw<IPC::BufferDescriptorC>());
- } else {
- u32 num_buf_c_descriptors =
- static_cast<u32>(command_header->buf_c_descriptor_flags.Value()) - 2;
-
- // This is used to detect possible underflows, in case something is broken
- // with the two ifs above and the flags value is == 0 || == 1.
- ASSERT(num_buf_c_descriptors < 14);
-
- for (u32 i = 0; i < num_buf_c_descriptors; ++i) {
- buffer_c_desciptors.push_back(rp.PopRaw<IPC::BufferDescriptorC>());
- }
- }
- }
-
- rp.SetCurrentOffset(data_payload_offset);
-
- command = rp.Pop<u32_le>();
- rp.Skip(1, false); // The command is actually an u64, but we don't use the high part.
-}
-
-Result HLERequestContext::PopulateFromIncomingCommandBuffer(const KHandleTable& handle_table,
- u32_le* src_cmdbuf) {
- ParseCommandBuffer(handle_table, src_cmdbuf, true);
-
- if (command_header->IsCloseCommand()) {
- // Close does not populate the rest of the IPC header
- return ResultSuccess;
- }
-
- std::copy_n(src_cmdbuf, IPC::COMMAND_BUFFER_LENGTH, cmd_buf.begin());
-
- return ResultSuccess;
-}
-
-Result HLERequestContext::WriteToOutgoingCommandBuffer(KThread& requesting_thread) {
- auto current_offset = handles_offset;
- auto& owner_process = *requesting_thread.GetOwnerProcess();
- auto& handle_table = owner_process.GetHandleTable();
-
- for (auto& object : outgoing_copy_objects) {
- Handle handle{};
- if (object) {
- R_TRY(handle_table.Add(&handle, object));
- }
- cmd_buf[current_offset++] = handle;
- }
- for (auto& object : outgoing_move_objects) {
- Handle handle{};
- if (object) {
- R_TRY(handle_table.Add(&handle, object));
-
- // Close our reference to the object, as it is being moved to the caller.
- object->Close();
- }
- cmd_buf[current_offset++] = handle;
- }
-
- // Write the domain objects to the command buffer, these go after the raw untranslated data.
- // TODO(Subv): This completely ignores C buffers.
-
- if (GetManager()->IsDomain()) {
- current_offset = domain_offset - static_cast<u32>(outgoing_domain_objects.size());
- for (auto& object : outgoing_domain_objects) {
- GetManager()->AppendDomainHandler(std::move(object));
- cmd_buf[current_offset++] = static_cast<u32_le>(GetManager()->DomainHandlerCount());
- }
- }
-
- // Copy the translated command buffer back into the thread's command buffer area.
- memory.WriteBlock(owner_process, requesting_thread.GetTLSAddress(), cmd_buf.data(),
- write_size * sizeof(u32));
-
- return ResultSuccess;
-}
-
-std::vector<u8> HLERequestContext::ReadBufferCopy(std::size_t buffer_index) const {
- const bool is_buffer_a{BufferDescriptorA().size() > buffer_index &&
- BufferDescriptorA()[buffer_index].Size()};
- if (is_buffer_a) {
- ASSERT_OR_EXECUTE_MSG(
- BufferDescriptorA().size() > buffer_index, { return {}; },
- "BufferDescriptorA invalid buffer_index {}", buffer_index);
- std::vector<u8> buffer(BufferDescriptorA()[buffer_index].Size());
- memory.ReadBlock(BufferDescriptorA()[buffer_index].Address(), buffer.data(), buffer.size());
- return buffer;
- } else {
- ASSERT_OR_EXECUTE_MSG(
- BufferDescriptorX().size() > buffer_index, { return {}; },
- "BufferDescriptorX invalid buffer_index {}", buffer_index);
- std::vector<u8> buffer(BufferDescriptorX()[buffer_index].Size());
- memory.ReadBlock(BufferDescriptorX()[buffer_index].Address(), buffer.data(), buffer.size());
- return buffer;
- }
-}
-
-std::span<const u8> HLERequestContext::ReadBuffer(std::size_t buffer_index) const {
- static thread_local std::array<Common::ScratchBuffer<u8>, 2> read_buffer_a;
- static thread_local std::array<Common::ScratchBuffer<u8>, 2> read_buffer_x;
-
- const bool is_buffer_a{BufferDescriptorA().size() > buffer_index &&
- BufferDescriptorA()[buffer_index].Size()};
- if (is_buffer_a) {
- ASSERT_OR_EXECUTE_MSG(
- BufferDescriptorA().size() > buffer_index, { return {}; },
- "BufferDescriptorA invalid buffer_index {}", buffer_index);
- auto& read_buffer = read_buffer_a[buffer_index];
- read_buffer.resize_destructive(BufferDescriptorA()[buffer_index].Size());
- memory.ReadBlock(BufferDescriptorA()[buffer_index].Address(), read_buffer.data(),
- read_buffer.size());
- return read_buffer;
- } else {
- ASSERT_OR_EXECUTE_MSG(
- BufferDescriptorX().size() > buffer_index, { return {}; },
- "BufferDescriptorX invalid buffer_index {}", buffer_index);
- auto& read_buffer = read_buffer_x[buffer_index];
- read_buffer.resize_destructive(BufferDescriptorX()[buffer_index].Size());
- memory.ReadBlock(BufferDescriptorX()[buffer_index].Address(), read_buffer.data(),
- read_buffer.size());
- return read_buffer;
- }
-}
-
-std::size_t HLERequestContext::WriteBuffer(const void* buffer, std::size_t size,
- std::size_t buffer_index) const {
- if (size == 0) {
- LOG_WARNING(Core, "skip empty buffer write");
- return 0;
- }
-
- const bool is_buffer_b{BufferDescriptorB().size() > buffer_index &&
- BufferDescriptorB()[buffer_index].Size()};
- const std::size_t buffer_size{GetWriteBufferSize(buffer_index)};
- if (size > buffer_size) {
- LOG_CRITICAL(Core, "size ({:016X}) is greater than buffer_size ({:016X})", size,
- buffer_size);
- size = buffer_size; // TODO(bunnei): This needs to be HW tested
- }
-
- if (is_buffer_b) {
- ASSERT_OR_EXECUTE_MSG(
- BufferDescriptorB().size() > buffer_index &&
- BufferDescriptorB()[buffer_index].Size() >= size,
- { return 0; }, "BufferDescriptorB is invalid, index={}, size={}", buffer_index, size);
- WriteBufferB(buffer, size, buffer_index);
- } else {
- ASSERT_OR_EXECUTE_MSG(
- BufferDescriptorC().size() > buffer_index &&
- BufferDescriptorC()[buffer_index].Size() >= size,
- { return 0; }, "BufferDescriptorC is invalid, index={}, size={}", buffer_index, size);
- WriteBufferC(buffer, size, buffer_index);
- }
-
- return size;
-}
-
-std::size_t HLERequestContext::WriteBufferB(const void* buffer, std::size_t size,
- std::size_t buffer_index) const {
- if (buffer_index >= BufferDescriptorB().size() || size == 0) {
- return 0;
- }
-
- const auto buffer_size{BufferDescriptorB()[buffer_index].Size()};
- if (size > buffer_size) {
- LOG_CRITICAL(Core, "size ({:016X}) is greater than buffer_size ({:016X})", size,
- buffer_size);
- size = buffer_size; // TODO(bunnei): This needs to be HW tested
- }
-
- memory.WriteBlock(BufferDescriptorB()[buffer_index].Address(), buffer, size);
- return size;
-}
-
-std::size_t HLERequestContext::WriteBufferC(const void* buffer, std::size_t size,
- std::size_t buffer_index) const {
- if (buffer_index >= BufferDescriptorC().size() || size == 0) {
- return 0;
- }
-
- const auto buffer_size{BufferDescriptorC()[buffer_index].Size()};
- if (size > buffer_size) {
- LOG_CRITICAL(Core, "size ({:016X}) is greater than buffer_size ({:016X})", size,
- buffer_size);
- size = buffer_size; // TODO(bunnei): This needs to be HW tested
- }
-
- memory.WriteBlock(BufferDescriptorC()[buffer_index].Address(), buffer, size);
- return size;
-}
-
-std::size_t HLERequestContext::GetReadBufferSize(std::size_t buffer_index) const {
- const bool is_buffer_a{BufferDescriptorA().size() > buffer_index &&
- BufferDescriptorA()[buffer_index].Size()};
- if (is_buffer_a) {
- ASSERT_OR_EXECUTE_MSG(
- BufferDescriptorA().size() > buffer_index, { return 0; },
- "BufferDescriptorA invalid buffer_index {}", buffer_index);
- return BufferDescriptorA()[buffer_index].Size();
- } else {
- ASSERT_OR_EXECUTE_MSG(
- BufferDescriptorX().size() > buffer_index, { return 0; },
- "BufferDescriptorX invalid buffer_index {}", buffer_index);
- return BufferDescriptorX()[buffer_index].Size();
- }
-}
-
-std::size_t HLERequestContext::GetWriteBufferSize(std::size_t buffer_index) const {
- const bool is_buffer_b{BufferDescriptorB().size() > buffer_index &&
- BufferDescriptorB()[buffer_index].Size()};
- if (is_buffer_b) {
- ASSERT_OR_EXECUTE_MSG(
- BufferDescriptorB().size() > buffer_index, { return 0; },
- "BufferDescriptorB invalid buffer_index {}", buffer_index);
- return BufferDescriptorB()[buffer_index].Size();
- } else {
- ASSERT_OR_EXECUTE_MSG(
- BufferDescriptorC().size() > buffer_index, { return 0; },
- "BufferDescriptorC invalid buffer_index {}", buffer_index);
- return BufferDescriptorC()[buffer_index].Size();
- }
- return 0;
-}
-
-bool HLERequestContext::CanReadBuffer(std::size_t buffer_index) const {
- const bool is_buffer_a{BufferDescriptorA().size() > buffer_index &&
- BufferDescriptorA()[buffer_index].Size()};
-
- if (is_buffer_a) {
- return BufferDescriptorA().size() > buffer_index;
- } else {
- return BufferDescriptorX().size() > buffer_index;
- }
-}
-
-bool HLERequestContext::CanWriteBuffer(std::size_t buffer_index) const {
- const bool is_buffer_b{BufferDescriptorB().size() > buffer_index &&
- BufferDescriptorB()[buffer_index].Size()};
-
- if (is_buffer_b) {
- return BufferDescriptorB().size() > buffer_index;
- } else {
- return BufferDescriptorC().size() > buffer_index;
- }
-}
-
-std::string HLERequestContext::Description() const {
- if (!command_header) {
- return "No command header available";
- }
- std::ostringstream s;
- s << "IPC::CommandHeader: Type:" << static_cast<u32>(command_header->type.Value());
- s << ", X(Pointer):" << command_header->num_buf_x_descriptors;
- if (command_header->num_buf_x_descriptors) {
- s << '[';
- for (u64 i = 0; i < command_header->num_buf_x_descriptors; ++i) {
- s << "0x" << std::hex << BufferDescriptorX()[i].Size();
- if (i < command_header->num_buf_x_descriptors - 1)
- s << ", ";
- }
- s << ']';
- }
- s << ", A(Send):" << command_header->num_buf_a_descriptors;
- if (command_header->num_buf_a_descriptors) {
- s << '[';
- for (u64 i = 0; i < command_header->num_buf_a_descriptors; ++i) {
- s << "0x" << std::hex << BufferDescriptorA()[i].Size();
- if (i < command_header->num_buf_a_descriptors - 1)
- s << ", ";
- }
- s << ']';
- }
- s << ", B(Receive):" << command_header->num_buf_b_descriptors;
- if (command_header->num_buf_b_descriptors) {
- s << '[';
- for (u64 i = 0; i < command_header->num_buf_b_descriptors; ++i) {
- s << "0x" << std::hex << BufferDescriptorB()[i].Size();
- if (i < command_header->num_buf_b_descriptors - 1)
- s << ", ";
- }
- s << ']';
- }
- s << ", C(ReceiveList):" << BufferDescriptorC().size();
- if (!BufferDescriptorC().empty()) {
- s << '[';
- for (u64 i = 0; i < BufferDescriptorC().size(); ++i) {
- s << "0x" << std::hex << BufferDescriptorC()[i].Size();
- if (i < BufferDescriptorC().size() - 1)
- s << ", ";
- }
- s << ']';
- }
- s << ", data_size:" << command_header->data_size.Value();
-
- return s.str();
-}
-
-} // namespace Kernel
diff --git a/src/core/hle/kernel/hle_ipc.h b/src/core/hle/kernel/hle_ipc.h
deleted file mode 100644
index 5bf4f171b..000000000
--- a/src/core/hle/kernel/hle_ipc.h
+++ /dev/null
@@ -1,416 +0,0 @@
-// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-#pragma once
-
-#include <array>
-#include <functional>
-#include <memory>
-#include <optional>
-#include <span>
-#include <string>
-#include <type_traits>
-#include <vector>
-
-#include "common/assert.h"
-#include "common/common_types.h"
-#include "common/concepts.h"
-#include "common/swap.h"
-#include "core/hle/ipc.h"
-#include "core/hle/kernel/svc_common.h"
-
-union Result;
-
-namespace Core::Memory {
-class Memory;
-}
-
-namespace IPC {
-class ResponseBuilder;
-}
-
-namespace Service {
-class ServiceFrameworkBase;
-}
-
-enum class ServiceThreadType {
- Default,
- CreateNew,
-};
-
-namespace Kernel {
-
-class Domain;
-class HLERequestContext;
-class KAutoObject;
-class KernelCore;
-class KEvent;
-class KHandleTable;
-class KServerPort;
-class KProcess;
-class KServerSession;
-class KThread;
-class KReadableEvent;
-class KSession;
-class SessionRequestManager;
-class ServiceThread;
-
-enum class ThreadWakeupReason;
-
-/**
- * Interface implemented by HLE Session handlers.
- * This can be provided to a ServerSession in order to hook into several relevant events
- * (such as a new connection or a SyncRequest) so they can be implemented in the emulator.
- */
-class SessionRequestHandler : public std::enable_shared_from_this<SessionRequestHandler> {
-public:
- SessionRequestHandler(KernelCore& kernel_, const char* service_name_,
- ServiceThreadType thread_type);
- virtual ~SessionRequestHandler();
-
- /**
- * Handles a sync request from the emulated application.
- * @param server_session The ServerSession that was triggered for this sync request,
- * it should be used to differentiate which client (As in ClientSession) we're answering to.
- * TODO(Subv): Use a wrapper structure to hold all the information relevant to
- * this request (ServerSession, Originator thread, Translated command buffer, etc).
- * @returns Result the result code of the translate operation.
- */
- virtual Result HandleSyncRequest(Kernel::KServerSession& session,
- Kernel::HLERequestContext& context) = 0;
-
- void AcceptSession(KServerPort* server_port);
- void RegisterSession(KServerSession* server_session,
- std::shared_ptr<SessionRequestManager> manager);
-
- ServiceThread& GetServiceThread() const {
- return service_thread;
- }
-
-protected:
- KernelCore& kernel;
- ServiceThread& service_thread;
-};
-
-using SessionRequestHandlerWeakPtr = std::weak_ptr<SessionRequestHandler>;
-using SessionRequestHandlerPtr = std::shared_ptr<SessionRequestHandler>;
-
-/**
- * Manages the underlying HLE requests for a session, and whether (or not) the session should be
- * treated as a domain. This is managed separately from server sessions, as this state is shared
- * when objects are cloned.
- */
-class SessionRequestManager final {
-public:
- explicit SessionRequestManager(KernelCore& kernel);
- ~SessionRequestManager();
-
- bool IsDomain() const {
- return is_domain;
- }
-
- void ConvertToDomain() {
- domain_handlers = {session_handler};
- is_domain = true;
- }
-
- void ConvertToDomainOnRequestEnd() {
- convert_to_domain = true;
- }
-
- std::size_t DomainHandlerCount() const {
- return domain_handlers.size();
- }
-
- bool HasSessionHandler() const {
- return session_handler != nullptr;
- }
-
- SessionRequestHandler& SessionHandler() {
- return *session_handler;
- }
-
- const SessionRequestHandler& SessionHandler() const {
- return *session_handler;
- }
-
- void CloseDomainHandler(std::size_t index) {
- if (index < DomainHandlerCount()) {
- domain_handlers[index] = nullptr;
- } else {
- ASSERT_MSG(false, "Unexpected handler index {}", index);
- }
- }
-
- SessionRequestHandlerWeakPtr DomainHandler(std::size_t index) const {
- ASSERT_MSG(index < DomainHandlerCount(), "Unexpected handler index {}", index);
- return domain_handlers.at(index);
- }
-
- void AppendDomainHandler(SessionRequestHandlerPtr&& handler) {
- domain_handlers.emplace_back(std::move(handler));
- }
-
- void SetSessionHandler(SessionRequestHandlerPtr&& handler) {
- session_handler = std::move(handler);
- }
-
- ServiceThread& GetServiceThread() const {
- return session_handler->GetServiceThread();
- }
-
- bool HasSessionRequestHandler(const HLERequestContext& context) const;
-
- Result HandleDomainSyncRequest(KServerSession* server_session, HLERequestContext& context);
- Result CompleteSyncRequest(KServerSession* server_session, HLERequestContext& context);
-
-private:
- bool convert_to_domain{};
- bool is_domain{};
- SessionRequestHandlerPtr session_handler;
- std::vector<SessionRequestHandlerPtr> domain_handlers;
-
-private:
- KernelCore& kernel;
-};
-
-/**
- * Class containing information about an in-flight IPC request being handled by an HLE service
- * implementation. Services should avoid using old global APIs (e.g. Kernel::GetCommandBuffer()) and
- * when possible use the APIs in this class to service the request.
- *
- * HLE handle protocol
- * ===================
- *
- * To avoid needing HLE services to keep a separate handle table, or having to directly modify the
- * requester's table, a tweaked protocol is used to receive and send handles in requests. The kernel
- * will decode the incoming handles into object pointers and insert a id in the buffer where the
- * handle would normally be. The service then calls GetIncomingHandle() with that id to get the
- * pointer to the object. Similarly, instead of inserting a handle into the command buffer, the
- * service calls AddOutgoingHandle() and stores the returned id where the handle would normally go.
- *
- * The end result is similar to just giving services their own real handle tables, but since these
- * ids are local to a specific context, it avoids requiring services to manage handles for objects
- * across multiple calls and ensuring that unneeded handles are cleaned up.
- */
-class HLERequestContext {
-public:
- explicit HLERequestContext(KernelCore& kernel, Core::Memory::Memory& memory,
- KServerSession* session, KThread* thread);
- ~HLERequestContext();
-
- /// Returns a pointer to the IPC command buffer for this request.
- [[nodiscard]] u32* CommandBuffer() {
- return cmd_buf.data();
- }
-
- /**
- * Returns the session through which this request was made. This can be used as a map key to
- * access per-client data on services.
- */
- [[nodiscard]] Kernel::KServerSession* Session() {
- return server_session;
- }
-
- /// Populates this context with data from the requesting process/thread.
- Result PopulateFromIncomingCommandBuffer(const KHandleTable& handle_table, u32_le* src_cmdbuf);
-
- /// Writes data from this context back to the requesting process/thread.
- Result WriteToOutgoingCommandBuffer(KThread& requesting_thread);
-
- [[nodiscard]] u32_le GetHipcCommand() const {
- return command;
- }
-
- [[nodiscard]] u32_le GetTipcCommand() const {
- return static_cast<u32_le>(command_header->type.Value()) -
- static_cast<u32_le>(IPC::CommandType::TIPC_CommandRegion);
- }
-
- [[nodiscard]] u32_le GetCommand() const {
- return command_header->IsTipc() ? GetTipcCommand() : GetHipcCommand();
- }
-
- [[nodiscard]] bool IsTipc() const {
- return command_header->IsTipc();
- }
-
- [[nodiscard]] IPC::CommandType GetCommandType() const {
- return command_header->type;
- }
-
- [[nodiscard]] u64 GetPID() const {
- return pid;
- }
-
- [[nodiscard]] u32 GetDataPayloadOffset() const {
- return data_payload_offset;
- }
-
- [[nodiscard]] const std::vector<IPC::BufferDescriptorX>& BufferDescriptorX() const {
- return buffer_x_desciptors;
- }
-
- [[nodiscard]] const std::vector<IPC::BufferDescriptorABW>& BufferDescriptorA() const {
- return buffer_a_desciptors;
- }
-
- [[nodiscard]] const std::vector<IPC::BufferDescriptorABW>& BufferDescriptorB() const {
- return buffer_b_desciptors;
- }
-
- [[nodiscard]] const std::vector<IPC::BufferDescriptorC>& BufferDescriptorC() const {
- return buffer_c_desciptors;
- }
-
- [[nodiscard]] const IPC::DomainMessageHeader& GetDomainMessageHeader() const {
- return domain_message_header.value();
- }
-
- [[nodiscard]] bool HasDomainMessageHeader() const {
- return domain_message_header.has_value();
- }
-
- /// Helper function to get a span of a buffer using the appropriate buffer descriptor
- [[nodiscard]] std::span<const u8> ReadBuffer(std::size_t buffer_index = 0) const;
-
- /// Helper function to read a copy of a buffer using the appropriate buffer descriptor
- [[nodiscard]] std::vector<u8> ReadBufferCopy(std::size_t buffer_index = 0) const;
-
- /// Helper function to write a buffer using the appropriate buffer descriptor
- std::size_t WriteBuffer(const void* buffer, std::size_t size,
- std::size_t buffer_index = 0) const;
-
- /// Helper function to write buffer B
- std::size_t WriteBufferB(const void* buffer, std::size_t size,
- std::size_t buffer_index = 0) const;
-
- /// Helper function to write buffer C
- std::size_t WriteBufferC(const void* buffer, std::size_t size,
- std::size_t buffer_index = 0) const;
-
- /* Helper function to write a buffer using the appropriate buffer descriptor
- *
- * @tparam T an arbitrary container that satisfies the
- * ContiguousContainer concept in the C++ standard library or a trivially copyable type.
- *
- * @param data The container/data to write into a buffer.
- * @param buffer_index The buffer in particular to write to.
- */
- template <typename T, typename = std::enable_if_t<!std::is_pointer_v<T>>>
- std::size_t WriteBuffer(const T& data, std::size_t buffer_index = 0) const {
- if constexpr (Common::IsContiguousContainer<T>) {
- using ContiguousType = typename T::value_type;
- static_assert(std::is_trivially_copyable_v<ContiguousType>,
- "Container to WriteBuffer must contain trivially copyable objects");
- return WriteBuffer(std::data(data), std::size(data) * sizeof(ContiguousType),
- buffer_index);
- } else {
- static_assert(std::is_trivially_copyable_v<T>, "T must be trivially copyable");
- return WriteBuffer(&data, sizeof(T), buffer_index);
- }
- }
-
- /// Helper function to get the size of the input buffer
- [[nodiscard]] std::size_t GetReadBufferSize(std::size_t buffer_index = 0) const;
-
- /// Helper function to get the size of the output buffer
- [[nodiscard]] std::size_t GetWriteBufferSize(std::size_t buffer_index = 0) const;
-
- /// Helper function to derive the number of elements able to be contained in the read buffer
- template <typename T>
- [[nodiscard]] std::size_t GetReadBufferNumElements(std::size_t buffer_index = 0) const {
- return GetReadBufferSize(buffer_index) / sizeof(T);
- }
-
- /// Helper function to derive the number of elements able to be contained in the write buffer
- template <typename T>
- [[nodiscard]] std::size_t GetWriteBufferNumElements(std::size_t buffer_index = 0) const {
- return GetWriteBufferSize(buffer_index) / sizeof(T);
- }
-
- /// Helper function to test whether the input buffer at buffer_index can be read
- [[nodiscard]] bool CanReadBuffer(std::size_t buffer_index = 0) const;
-
- /// Helper function to test whether the output buffer at buffer_index can be written
- [[nodiscard]] bool CanWriteBuffer(std::size_t buffer_index = 0) const;
-
- [[nodiscard]] Handle GetCopyHandle(std::size_t index) const {
- return incoming_copy_handles.at(index);
- }
-
- [[nodiscard]] Handle GetMoveHandle(std::size_t index) const {
- return incoming_move_handles.at(index);
- }
-
- void AddMoveObject(KAutoObject* object) {
- outgoing_move_objects.emplace_back(object);
- }
-
- void AddCopyObject(KAutoObject* object) {
- outgoing_copy_objects.emplace_back(object);
- }
-
- void AddDomainObject(SessionRequestHandlerPtr object) {
- outgoing_domain_objects.emplace_back(std::move(object));
- }
-
- template <typename T>
- std::shared_ptr<T> GetDomainHandler(std::size_t index) const {
- return std::static_pointer_cast<T>(GetManager()->DomainHandler(index).lock());
- }
-
- void SetSessionRequestManager(std::weak_ptr<SessionRequestManager> manager_) {
- manager = manager_;
- }
-
- [[nodiscard]] std::string Description() const;
-
- [[nodiscard]] KThread& GetThread() {
- return *thread;
- }
-
- [[nodiscard]] std::shared_ptr<SessionRequestManager> GetManager() const {
- return manager.lock();
- }
-
-private:
- friend class IPC::ResponseBuilder;
-
- void ParseCommandBuffer(const KHandleTable& handle_table, u32_le* src_cmdbuf, bool incoming);
-
- std::array<u32, IPC::COMMAND_BUFFER_LENGTH> cmd_buf;
- Kernel::KServerSession* server_session{};
- KThread* thread;
-
- std::vector<Handle> incoming_move_handles;
- std::vector<Handle> incoming_copy_handles;
-
- std::vector<KAutoObject*> outgoing_move_objects;
- std::vector<KAutoObject*> outgoing_copy_objects;
- std::vector<SessionRequestHandlerPtr> outgoing_domain_objects;
-
- std::optional<IPC::CommandHeader> command_header;
- std::optional<IPC::HandleDescriptorHeader> handle_descriptor_header;
- std::optional<IPC::DataPayloadHeader> data_payload_header;
- std::optional<IPC::DomainMessageHeader> domain_message_header;
- std::vector<IPC::BufferDescriptorX> buffer_x_desciptors;
- std::vector<IPC::BufferDescriptorABW> buffer_a_desciptors;
- std::vector<IPC::BufferDescriptorABW> buffer_b_desciptors;
- std::vector<IPC::BufferDescriptorABW> buffer_w_desciptors;
- std::vector<IPC::BufferDescriptorC> buffer_c_desciptors;
-
- u32_le command{};
- u64 pid{};
- u32 write_size{};
- u32 data_payload_offset{};
- u32 handles_offset{};
- u32 domain_offset{};
-
- std::weak_ptr<SessionRequestManager> manager{};
-
- KernelCore& kernel;
- Core::Memory::Memory& memory;
-};
-
-} // namespace Kernel
diff --git a/src/core/hle/kernel/init/init_slab_setup.cpp b/src/core/hle/kernel/init/init_slab_setup.cpp
index abdb5639f..5e4090e2b 100644
--- a/src/core/hle/kernel/init/init_slab_setup.cpp
+++ b/src/core/hle/kernel/init/init_slab_setup.cpp
@@ -33,6 +33,9 @@
namespace Kernel::Init {
+// For macro convenience.
+using KThreadLockInfo = KThread::LockWithPriorityInheritanceInfo;
+
#define SLAB_COUNT(CLASS) kernel.SlabResourceCounts().num_##CLASS
#define FOREACH_SLAB_TYPE(HANDLER, ...) \
@@ -54,7 +57,8 @@ namespace Kernel::Init {
HANDLER(KResourceLimit, (SLAB_COUNT(KResourceLimit)), ##__VA_ARGS__) \
HANDLER(KEventInfo, (SLAB_COUNT(KThread) + SLAB_COUNT(KDebug)), ##__VA_ARGS__) \
HANDLER(KDebug, (SLAB_COUNT(KDebug)), ##__VA_ARGS__) \
- HANDLER(KSecureSystemResource, (SLAB_COUNT(KProcess)), ##__VA_ARGS__)
+ HANDLER(KSecureSystemResource, (SLAB_COUNT(KProcess)), ##__VA_ARGS__) \
+ HANDLER(KThreadLockInfo, (SLAB_COUNT(KThread)), ##__VA_ARGS__)
namespace {
@@ -131,7 +135,7 @@ VAddr InitializeSlabHeap(Core::System& system, KMemoryLayout& memory_layout, VAd
}
size_t CalculateSlabHeapGapSize() {
- constexpr size_t KernelSlabHeapGapSize = 2_MiB - 320_KiB;
+ constexpr size_t KernelSlabHeapGapSize = 2_MiB - 356_KiB;
static_assert(KernelSlabHeapGapSize <= KernelSlabHeapGapsSizeMax);
return KernelSlabHeapGapSize;
}
diff --git a/src/core/hle/kernel/k_address_arbiter.cpp b/src/core/hle/kernel/k_address_arbiter.cpp
index a442a3b98..fb86451ea 100644
--- a/src/core/hle/kernel/k_address_arbiter.cpp
+++ b/src/core/hle/kernel/k_address_arbiter.cpp
@@ -29,7 +29,9 @@ bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 valu
auto& monitor = system.Monitor();
const auto current_core = system.Kernel().CurrentPhysicalCoreIndex();
- // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
+ // NOTE: If scheduler lock is not held here, interrupt disable is required.
+ // KScopedInterruptDisable di;
+
// TODO(bunnei): We should call CanAccessAtomic(..) here.
// Load the value from the address.
@@ -59,7 +61,9 @@ bool UpdateIfEqual(Core::System& system, s32* out, VAddr address, s32 value, s32
auto& monitor = system.Monitor();
const auto current_core = system.Kernel().CurrentPhysicalCoreIndex();
- // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
+ // NOTE: If scheduler lock is not held here, interrupt disable is required.
+ // KScopedInterruptDisable di;
+
// TODO(bunnei): We should call CanAccessAtomic(..) here.
// Load the value from the address.
diff --git a/src/core/hle/kernel/k_address_space_info.cpp b/src/core/hle/kernel/k_address_space_info.cpp
index 3e612a207..c36eb5dc4 100644
--- a/src/core/hle/kernel/k_address_space_info.cpp
+++ b/src/core/hle/kernel/k_address_space_info.cpp
@@ -23,86 +23,33 @@ constexpr std::array<KAddressSpaceInfo, 13> AddressSpaceInfos{{
{ .bit_width = 32, .address = Size_Invalid, .size = 1_GiB , .type = KAddressSpaceInfo::Type::Heap, },
{ .bit_width = 36, .address = 128_MiB , .size = 2_GiB - 128_MiB, .type = KAddressSpaceInfo::Type::MapSmall, },
{ .bit_width = 36, .address = 2_GiB , .size = 64_GiB - 2_GiB , .type = KAddressSpaceInfo::Type::MapLarge, },
- { .bit_width = 36, .address = Size_Invalid, .size = 6_GiB , .type = KAddressSpaceInfo::Type::Heap, },
+ { .bit_width = 36, .address = Size_Invalid, .size = 8_GiB , .type = KAddressSpaceInfo::Type::Heap, },
{ .bit_width = 36, .address = Size_Invalid, .size = 6_GiB , .type = KAddressSpaceInfo::Type::Alias, },
{ .bit_width = 39, .address = 128_MiB , .size = 512_GiB - 128_MiB, .type = KAddressSpaceInfo::Type::Map39Bit, },
{ .bit_width = 39, .address = Size_Invalid, .size = 64_GiB , .type = KAddressSpaceInfo::Type::MapSmall },
- { .bit_width = 39, .address = Size_Invalid, .size = 6_GiB , .type = KAddressSpaceInfo::Type::Heap, },
+ { .bit_width = 39, .address = Size_Invalid, .size = 8_GiB , .type = KAddressSpaceInfo::Type::Heap, },
{ .bit_width = 39, .address = Size_Invalid, .size = 64_GiB , .type = KAddressSpaceInfo::Type::Alias, },
{ .bit_width = 39, .address = Size_Invalid, .size = 2_GiB , .type = KAddressSpaceInfo::Type::Stack, },
}};
// clang-format on
-constexpr bool IsAllowedIndexForAddress(std::size_t index) {
- return index < AddressSpaceInfos.size() && AddressSpaceInfos[index].address != Size_Invalid;
-}
-
-using IndexArray =
- std::array<std::size_t, static_cast<std::size_t>(KAddressSpaceInfo::Type::Count)>;
-
-constexpr IndexArray AddressSpaceIndices32Bit{
- 0, 1, 0, 2, 0, 3,
-};
-
-constexpr IndexArray AddressSpaceIndices36Bit{
- 4, 5, 4, 6, 4, 7,
-};
-
-constexpr IndexArray AddressSpaceIndices39Bit{
- 9, 8, 8, 10, 12, 11,
-};
-
-constexpr bool IsAllowed32BitType(KAddressSpaceInfo::Type type) {
- return type < KAddressSpaceInfo::Type::Count && type != KAddressSpaceInfo::Type::Map39Bit &&
- type != KAddressSpaceInfo::Type::Stack;
-}
-
-constexpr bool IsAllowed36BitType(KAddressSpaceInfo::Type type) {
- return type < KAddressSpaceInfo::Type::Count && type != KAddressSpaceInfo::Type::Map39Bit &&
- type != KAddressSpaceInfo::Type::Stack;
-}
-
-constexpr bool IsAllowed39BitType(KAddressSpaceInfo::Type type) {
- return type < KAddressSpaceInfo::Type::Count && type != KAddressSpaceInfo::Type::MapLarge;
+const KAddressSpaceInfo& GetAddressSpaceInfo(size_t width, KAddressSpaceInfo::Type type) {
+ for (auto& info : AddressSpaceInfos) {
+ if (info.bit_width == width && info.type == type) {
+ return info;
+ }
+ }
+ UNREACHABLE_MSG("Could not find AddressSpaceInfo");
}
} // namespace
-u64 KAddressSpaceInfo::GetAddressSpaceStart(std::size_t width, Type type) {
- const std::size_t index{static_cast<std::size_t>(type)};
- switch (width) {
- case 32:
- ASSERT(IsAllowed32BitType(type));
- ASSERT(IsAllowedIndexForAddress(AddressSpaceIndices32Bit[index]));
- return AddressSpaceInfos[AddressSpaceIndices32Bit[index]].address;
- case 36:
- ASSERT(IsAllowed36BitType(type));
- ASSERT(IsAllowedIndexForAddress(AddressSpaceIndices36Bit[index]));
- return AddressSpaceInfos[AddressSpaceIndices36Bit[index]].address;
- case 39:
- ASSERT(IsAllowed39BitType(type));
- ASSERT(IsAllowedIndexForAddress(AddressSpaceIndices39Bit[index]));
- return AddressSpaceInfos[AddressSpaceIndices39Bit[index]].address;
- }
- ASSERT(false);
- return 0;
+std::size_t KAddressSpaceInfo::GetAddressSpaceStart(size_t width, KAddressSpaceInfo::Type type) {
+ return GetAddressSpaceInfo(width, type).address;
}
-std::size_t KAddressSpaceInfo::GetAddressSpaceSize(std::size_t width, Type type) {
- const std::size_t index{static_cast<std::size_t>(type)};
- switch (width) {
- case 32:
- ASSERT(IsAllowed32BitType(type));
- return AddressSpaceInfos[AddressSpaceIndices32Bit[index]].size;
- case 36:
- ASSERT(IsAllowed36BitType(type));
- return AddressSpaceInfos[AddressSpaceIndices36Bit[index]].size;
- case 39:
- ASSERT(IsAllowed39BitType(type));
- return AddressSpaceInfos[AddressSpaceIndices39Bit[index]].size;
- }
- ASSERT(false);
- return 0;
+std::size_t KAddressSpaceInfo::GetAddressSpaceSize(size_t width, KAddressSpaceInfo::Type type) {
+ return GetAddressSpaceInfo(width, type).size;
}
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_address_space_info.h b/src/core/hle/kernel/k_address_space_info.h
index 69e9d77f2..9a26f6b90 100644
--- a/src/core/hle/kernel/k_address_space_info.h
+++ b/src/core/hle/kernel/k_address_space_info.h
@@ -18,7 +18,7 @@ struct KAddressSpaceInfo final {
Count,
};
- static u64 GetAddressSpaceStart(std::size_t width, Type type);
+ static std::size_t GetAddressSpaceStart(std::size_t width, Type type);
static std::size_t GetAddressSpaceSize(std::size_t width, Type type);
const std::size_t bit_width{};
diff --git a/src/core/hle/kernel/k_client_port.cpp b/src/core/hle/kernel/k_client_port.cpp
index c72a91a76..700ae71e3 100644
--- a/src/core/hle/kernel/k_client_port.cpp
+++ b/src/core/hle/kernel/k_client_port.cpp
@@ -2,7 +2,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
#include "common/scope_exit.h"
-#include "core/hle/kernel/hle_ipc.h"
#include "core/hle/kernel/k_client_port.h"
#include "core/hle/kernel/k_port.h"
#include "core/hle/kernel/k_scheduler.h"
diff --git a/src/core/hle/kernel/k_client_port.h b/src/core/hle/kernel/k_client_port.h
index 81046fb86..a757cf9cd 100644
--- a/src/core/hle/kernel/k_client_port.h
+++ b/src/core/hle/kernel/k_client_port.h
@@ -15,7 +15,6 @@ namespace Kernel {
class KClientSession;
class KernelCore;
class KPort;
-class SessionRequestManager;
class KClientPort final : public KSynchronizationObject {
KERNEL_AUTOOBJECT_TRAITS(KClientPort, KSynchronizationObject);
diff --git a/src/core/hle/kernel/k_client_session.cpp b/src/core/hle/kernel/k_client_session.cpp
index b4197a8d5..da0c9ac8c 100644
--- a/src/core/hle/kernel/k_client_session.cpp
+++ b/src/core/hle/kernel/k_client_session.cpp
@@ -2,7 +2,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
#include "common/scope_exit.h"
-#include "core/hle/kernel/hle_ipc.h"
#include "core/hle/kernel/k_client_session.h"
#include "core/hle/kernel/k_server_session.h"
#include "core/hle/kernel/k_session.h"
diff --git a/src/core/hle/kernel/k_condition_variable.cpp b/src/core/hle/kernel/k_condition_variable.cpp
index 3f0be1c3f..f40cf92b1 100644
--- a/src/core/hle/kernel/k_condition_variable.cpp
+++ b/src/core/hle/kernel/k_condition_variable.cpp
@@ -111,36 +111,36 @@ Result KConditionVariable::SignalToAddress(VAddr addr) {
KScopedSchedulerLock sl(kernel);
// Remove waiter thread.
- s32 num_waiters{};
- KThread* next_owner_thread =
- owner_thread->RemoveWaiterByKey(std::addressof(num_waiters), addr);
+ bool has_waiters{};
+ KThread* const next_owner_thread =
+ owner_thread->RemoveUserWaiterByKey(std::addressof(has_waiters), addr);
// Determine the next tag.
u32 next_value{};
if (next_owner_thread != nullptr) {
next_value = next_owner_thread->GetAddressKeyValue();
- if (num_waiters > 1) {
+ if (has_waiters) {
next_value |= Svc::HandleWaitMask;
}
+ }
- // Write the value to userspace.
- Result result{ResultSuccess};
- if (WriteToUser(system, addr, std::addressof(next_value))) [[likely]] {
- result = ResultSuccess;
- } else {
- result = ResultInvalidCurrentMemory;
- }
+ // Synchronize memory before proceeding.
+ std::atomic_thread_fence(std::memory_order_seq_cst);
- // Signal the next owner thread.
- next_owner_thread->EndWait(result);
- return result;
+ // Write the value to userspace.
+ Result result{ResultSuccess};
+ if (WriteToUser(system, addr, std::addressof(next_value))) [[likely]] {
+ result = ResultSuccess;
} else {
- // Just write the value to userspace.
- R_UNLESS(WriteToUser(system, addr, std::addressof(next_value)),
- ResultInvalidCurrentMemory);
+ result = ResultInvalidCurrentMemory;
+ }
- return ResultSuccess;
+ // If necessary, signal the next owner thread.
+ if (next_owner_thread != nullptr) {
+ next_owner_thread->EndWait(result);
}
+
+ R_RETURN(result);
}
}
@@ -198,7 +198,9 @@ void KConditionVariable::SignalImpl(KThread* thread) {
u32 prev_tag{};
bool can_access{};
{
- // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
+ // NOTE: If scheduler lock is not held here, interrupt disable is required.
+ // KScopedInterruptDisable di;
+
// TODO(bunnei): We should call CanAccessAtomic(..) here.
can_access = true;
if (can_access) [[likely]] {
@@ -245,9 +247,11 @@ void KConditionVariable::Signal(u64 cv_key, s32 count) {
(it->GetConditionVariableKey() == cv_key)) {
KThread* target_thread = std::addressof(*it);
- this->SignalImpl(target_thread);
it = thread_tree.erase(it);
target_thread->ClearConditionVariable();
+
+ this->SignalImpl(target_thread);
+
++num_waiters;
}
@@ -277,16 +281,16 @@ Result KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) {
// Update the value and process for the next owner.
{
// Remove waiter thread.
- s32 num_waiters{};
+ bool has_waiters{};
KThread* next_owner_thread =
- cur_thread->RemoveWaiterByKey(std::addressof(num_waiters), addr);
+ cur_thread->RemoveUserWaiterByKey(std::addressof(has_waiters), addr);
// Update for the next owner thread.
u32 next_value{};
if (next_owner_thread != nullptr) {
// Get the next tag value.
next_value = next_owner_thread->GetAddressKeyValue();
- if (num_waiters > 1) {
+ if (has_waiters) {
next_value |= Svc::HandleWaitMask;
}
diff --git a/src/core/hle/kernel/k_device_address_space.h b/src/core/hle/kernel/k_device_address_space.h
index 4709df995..b4a014c38 100644
--- a/src/core/hle/kernel/k_device_address_space.h
+++ b/src/core/hle/kernel/k_device_address_space.h
@@ -21,9 +21,9 @@ public:
~KDeviceAddressSpace();
Result Initialize(u64 address, u64 size);
- void Finalize();
+ void Finalize() override;
- bool IsInitialized() const {
+ bool IsInitialized() const override {
return m_is_initialized;
}
static void PostDestroy(uintptr_t arg) {}
diff --git a/src/core/hle/kernel/k_light_lock.cpp b/src/core/hle/kernel/k_light_lock.cpp
index d791acbe3..14cb615da 100644
--- a/src/core/hle/kernel/k_light_lock.cpp
+++ b/src/core/hle/kernel/k_light_lock.cpp
@@ -90,15 +90,15 @@ void KLightLock::UnlockSlowPath(uintptr_t _cur_thread) {
KScopedSchedulerLock sl(kernel);
// Get the next owner.
- s32 num_waiters;
- KThread* next_owner = owner_thread->RemoveWaiterByKey(
- std::addressof(num_waiters), reinterpret_cast<uintptr_t>(std::addressof(tag)));
+ bool has_waiters;
+ KThread* next_owner = owner_thread->RemoveKernelWaiterByKey(
+ std::addressof(has_waiters), reinterpret_cast<uintptr_t>(std::addressof(tag)));
// Pass the lock to the next owner.
uintptr_t next_tag = 0;
if (next_owner != nullptr) {
next_tag =
- reinterpret_cast<uintptr_t>(next_owner) | static_cast<uintptr_t>(num_waiters > 1);
+ reinterpret_cast<uintptr_t>(next_owner) | static_cast<uintptr_t>(has_waiters);
next_owner->EndWait(ResultSuccess);
diff --git a/src/core/hle/kernel/k_port.cpp b/src/core/hle/kernel/k_port.cpp
index 77d00ae2c..0a45ffd57 100644
--- a/src/core/hle/kernel/k_port.cpp
+++ b/src/core/hle/kernel/k_port.cpp
@@ -1,7 +1,6 @@
// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
-#include "core/hle/kernel/hle_ipc.h"
#include "core/hle/kernel/k_port.h"
#include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/svc_results.h"
diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp
index 0e4283a0c..d44f6e921 100644
--- a/src/core/hle/kernel/k_process.cpp
+++ b/src/core/hle/kernel/k_process.cpp
@@ -119,7 +119,6 @@ void KProcess::DecrementRunningThreadCount() {
if (const auto prev = num_running_threads--; prev == 1) {
// TODO(bunnei): Process termination to be implemented when multiprocess is supported.
- UNIMPLEMENTED_MSG("KProcess termination is not implemennted!");
}
}
@@ -157,9 +156,9 @@ bool KProcess::ReleaseUserException(KThread* thread) {
exception_thread = nullptr;
// Remove waiter thread.
- s32 num_waiters{};
- if (KThread* next = thread->RemoveWaiterByKey(
- std::addressof(num_waiters),
+ bool has_waiters{};
+ if (KThread* next = thread->RemoveKernelWaiterByKey(
+ std::addressof(has_waiters),
reinterpret_cast<uintptr_t>(std::addressof(exception_thread)));
next != nullptr) {
next->EndWait(ResultSuccess);
@@ -357,9 +356,6 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std:
system_resource_size = metadata.GetSystemResourceSize();
image_size = code_size;
- // We currently do not support process-specific system resource
- UNIMPLEMENTED_IF(system_resource_size != 0);
-
KScopedResourceReservation memory_reservation(
resource_limit, LimitableResource::PhysicalMemoryMax, code_size + system_resource_size);
if (!memory_reservation.Succeeded()) {
diff --git a/src/core/hle/kernel/k_process.h b/src/core/hle/kernel/k_process.h
index 09bf2f1d0..549809000 100644
--- a/src/core/hle/kernel/k_process.h
+++ b/src/core/hle/kernel/k_process.h
@@ -310,10 +310,10 @@ public:
/// Clears the signaled state of the process if and only if it's signaled.
///
/// @pre The process must not be already terminated. If this is called on a
- /// terminated process, then ERR_INVALID_STATE will be returned.
+ /// terminated process, then ResultInvalidState will be returned.
///
/// @pre The process must be in a signaled state. If this is called on a
- /// process instance that is not signaled, ERR_INVALID_STATE will be
+ /// process instance that is not signaled, ResultInvalidState will be
/// returned.
Result Reset();
diff --git a/src/core/hle/kernel/k_resource_limit.cpp b/src/core/hle/kernel/k_resource_limit.cpp
index b9d22b414..626517619 100644
--- a/src/core/hle/kernel/k_resource_limit.cpp
+++ b/src/core/hle/kernel/k_resource_limit.cpp
@@ -2,6 +2,7 @@
// SPDX-License-Identifier: GPL-2.0-or-later
#include "common/assert.h"
+#include "common/overflow.h"
#include "core/core.h"
#include "core/core_timing.h"
#include "core/hle/kernel/k_resource_limit.h"
@@ -104,7 +105,7 @@ bool KResourceLimit::Reserve(LimitableResource which, s64 value, s64 timeout) {
ASSERT(current_hints[index] <= current_values[index]);
// If we would overflow, don't allow to succeed.
- if (current_values[index] + value <= current_values[index]) {
+ if (Common::WrappingAdd(current_values[index], value) <= current_values[index]) {
break;
}
diff --git a/src/core/hle/kernel/k_scheduler_lock.h b/src/core/hle/kernel/k_scheduler_lock.h
index 129d60472..13463717f 100644
--- a/src/core/hle/kernel/k_scheduler_lock.h
+++ b/src/core/hle/kernel/k_scheduler_lock.h
@@ -31,22 +31,23 @@ public:
}
if (IsLockedByCurrentThread()) {
- // If we already own the lock, we can just increment the count.
+ // If we already own the lock, the lock count should be > 0.
+ // For debug, ensure this is true.
ASSERT(lock_count > 0);
- lock_count++;
} else {
// Otherwise, we want to disable scheduling and acquire the spinlock.
SchedulerType::DisableScheduling(kernel);
spin_lock.Lock();
- // For debug, ensure that our state is valid.
ASSERT(lock_count == 0);
ASSERT(owner_thread == nullptr);
- // Increment count, take ownership.
- lock_count = 1;
+ // Take ownership of the lock.
owner_thread = GetCurrentThreadPointer(kernel);
}
+
+ // Increment the lock count.
+ lock_count++;
}
void Unlock() {
diff --git a/src/core/hle/kernel/k_scoped_lock.h b/src/core/hle/kernel/k_scoped_lock.h
index 59b3e32ae..a15640fd2 100644
--- a/src/core/hle/kernel/k_scoped_lock.h
+++ b/src/core/hle/kernel/k_scoped_lock.h
@@ -4,6 +4,7 @@
#pragma once
#include <concepts>
+#include <memory>
#include <type_traits>
namespace Kernel {
diff --git a/src/core/hle/kernel/k_server_session.cpp b/src/core/hle/kernel/k_server_session.cpp
index aa1941f01..01591af5b 100644
--- a/src/core/hle/kernel/k_server_session.cpp
+++ b/src/core/hle/kernel/k_server_session.cpp
@@ -10,8 +10,6 @@
#include "common/scope_exit.h"
#include "core/core.h"
#include "core/core_timing.h"
-#include "core/hle/ipc_helpers.h"
-#include "core/hle/kernel/hle_ipc.h"
#include "core/hle/kernel/k_client_port.h"
#include "core/hle/kernel/k_handle_table.h"
#include "core/hle/kernel/k_process.h"
@@ -22,6 +20,8 @@
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/k_thread_queue.h"
#include "core/hle/kernel/kernel.h"
+#include "core/hle/service/hle_ipc.h"
+#include "core/hle/service/ipc_helpers.h"
#include "core/memory.h"
namespace Kernel {
@@ -281,8 +281,8 @@ Result KServerSession::SendReply(bool is_hle) {
return result;
}
-Result KServerSession::ReceiveRequest(std::shared_ptr<HLERequestContext>* out_context,
- std::weak_ptr<SessionRequestManager> manager) {
+Result KServerSession::ReceiveRequest(std::shared_ptr<Service::HLERequestContext>* out_context,
+ std::weak_ptr<Service::SessionRequestManager> manager) {
// Lock the session.
KScopedLightLock lk{m_lock};
@@ -329,7 +329,8 @@ Result KServerSession::ReceiveRequest(std::shared_ptr<HLERequestContext>* out_co
if (out_context != nullptr) {
// HLE request.
u32* cmd_buf{reinterpret_cast<u32*>(memory.GetPointer(client_message))};
- *out_context = std::make_shared<HLERequestContext>(kernel, memory, this, client_thread);
+ *out_context =
+ std::make_shared<Service::HLERequestContext>(kernel, memory, this, client_thread);
(*out_context)->SetSessionRequestManager(manager);
(*out_context)
->PopulateFromIncomingCommandBuffer(client_thread->GetOwnerProcess()->GetHandleTable(),
diff --git a/src/core/hle/kernel/k_server_session.h b/src/core/hle/kernel/k_server_session.h
index 6e189af8b..33f380352 100644
--- a/src/core/hle/kernel/k_server_session.h
+++ b/src/core/hle/kernel/k_server_session.h
@@ -10,18 +10,20 @@
#include <boost/intrusive/list.hpp>
-#include "core/hle/kernel/hle_ipc.h"
#include "core/hle/kernel/k_light_lock.h"
#include "core/hle/kernel/k_session_request.h"
#include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/result.h"
+namespace Service {
+class HLERequestContext;
+class SessionRequestManager;
+} // namespace Service
+
namespace Kernel {
-class HLERequestContext;
class KernelCore;
class KSession;
-class SessionRequestManager;
class KThread;
class KServerSession final : public KSynchronizationObject,
@@ -52,8 +54,8 @@ public:
/// TODO: flesh these out to match the real kernel
Result OnRequest(KSessionRequest* request);
Result SendReply(bool is_hle = false);
- Result ReceiveRequest(std::shared_ptr<HLERequestContext>* out_context = nullptr,
- std::weak_ptr<SessionRequestManager> manager = {});
+ Result ReceiveRequest(std::shared_ptr<Service::HLERequestContext>* out_context = nullptr,
+ std::weak_ptr<Service::SessionRequestManager> manager = {});
Result SendReplyHLE() {
return SendReply(true);
diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp
index 2d3da9d66..8c403f5fd 100644
--- a/src/core/hle/kernel/k_thread.cpp
+++ b/src/core/hle/kernel/k_thread.cpp
@@ -29,6 +29,7 @@
#include "core/hle/kernel/k_thread_queue.h"
#include "core/hle/kernel/k_worker_task_manager.h"
#include "core/hle/kernel/kernel.h"
+#include "core/hle/kernel/svc.h"
#include "core/hle/kernel/svc_results.h"
#include "core/hle/kernel/svc_types.h"
#include "core/hle/result.h"
@@ -190,7 +191,7 @@ Result KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack
light_ipc_data = nullptr;
// We're not waiting for a lock, and we haven't disabled migration.
- lock_owner = nullptr;
+ waiting_lock_info = nullptr;
num_core_migration_disables = 0;
// We have no waiters, but we do have an entrypoint.
@@ -298,6 +299,25 @@ Result KThread::InitializeUserThread(Core::System& system, KThread* thread, KThr
ThreadType::User, system.GetCpuManager().GetGuestThreadFunc()));
}
+Result KThread::InitializeServiceThread(Core::System& system, KThread* thread,
+ std::function<void()>&& func, s32 prio, s32 virt_core,
+ KProcess* owner) {
+ system.Kernel().GlobalSchedulerContext().AddThread(thread);
+ std::function<void()> func2{[&system, func{std::move(func)}] {
+ // Similar to UserModeThreadStarter.
+ system.Kernel().CurrentScheduler()->OnThreadStart();
+
+ // Run the guest function.
+ func();
+
+ // Exit.
+ Svc::ExitThread(system);
+ }};
+
+ R_RETURN(InitializeThread(thread, {}, {}, {}, prio, virt_core, owner, ThreadType::HighPriority,
+ std::move(func2)));
+}
+
void KThread::PostDestroy(uintptr_t arg) {
KProcess* owner = reinterpret_cast<KProcess*>(arg & ~1ULL);
const bool resource_limit_release_hint = (arg & 1);
@@ -321,25 +341,39 @@ void KThread::Finalize() {
// Release any waiters.
{
- ASSERT(lock_owner == nullptr);
+ ASSERT(waiting_lock_info == nullptr);
KScopedSchedulerLock sl{kernel};
- auto it = waiter_list.begin();
- while (it != waiter_list.end()) {
- // Get the thread.
- KThread* const waiter = std::addressof(*it);
+ // Check that we have no kernel waiters.
+ ASSERT(num_kernel_waiters == 0);
+
+ auto it = held_lock_info_list.begin();
+ while (it != held_lock_info_list.end()) {
+ // Get the lock info.
+ auto* const lock_info = std::addressof(*it);
+
+ // The lock shouldn't have a kernel waiter.
+ ASSERT(!lock_info->GetIsKernelAddressKey());
- // The thread shouldn't be a kernel waiter.
- ASSERT(!waiter->GetAddressKeyIsKernel());
+ // Remove all waiters.
+ while (lock_info->GetWaiterCount() != 0) {
+ // Get the front waiter.
+ KThread* const waiter = lock_info->GetHighestPriorityWaiter();
+
+ // Remove it from the lock.
+ if (lock_info->RemoveWaiter(waiter)) {
+ ASSERT(lock_info->GetWaiterCount() == 0);
+ }
- // Clear the lock owner.
- waiter->SetLockOwner(nullptr);
+ // Cancel the thread's wait.
+ waiter->CancelWait(ResultInvalidState, true);
+ }
- // Erase the waiter from our list.
- it = waiter_list.erase(it);
+ // Remove the held lock from our list.
+ it = held_lock_info_list.erase(it);
- // Cancel the thread's wait.
- waiter->CancelWait(ResultInvalidState, true);
+ // Free the lock info.
+ LockWithPriorityInheritanceInfo::Free(kernel, lock_info);
}
}
@@ -688,6 +722,24 @@ void KThread::SetBasePriority(s32 value) {
RestorePriority(kernel, this);
}
+KThread* KThread::GetLockOwner() const {
+ return waiting_lock_info != nullptr ? waiting_lock_info->GetOwner() : nullptr;
+}
+
+void KThread::IncreaseBasePriority(s32 priority_) {
+ ASSERT(Svc::HighestThreadPriority <= priority_ && priority_ <= Svc::LowestThreadPriority);
+ ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel));
+ ASSERT(!this->GetStackParameters().is_pinned);
+
+ // Set our base priority.
+ if (base_priority > priority_) {
+ base_priority = priority_;
+
+ // Perform a priority restoration.
+ RestorePriority(kernel, this);
+ }
+}
+
void KThread::RequestSuspend(SuspendType type) {
KScopedSchedulerLock sl{kernel};
@@ -871,51 +923,89 @@ Result KThread::GetThreadContext3(std::vector<u8>& out) {
R_SUCCEED();
}
-void KThread::AddWaiterImpl(KThread* thread) {
- ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+void KThread::AddHeldLock(LockWithPriorityInheritanceInfo* lock_info) {
+ ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel));
+
+ // Set ourselves as the lock's owner.
+ lock_info->SetOwner(this);
+
+ // Add the lock to our held list.
+ held_lock_info_list.push_front(*lock_info);
+}
+
+KThread::LockWithPriorityInheritanceInfo* KThread::FindHeldLock(VAddr address_key_,
+ bool is_kernel_address_key_) {
+ ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel));
- // Find the right spot to insert the waiter.
- auto it = waiter_list.begin();
- while (it != waiter_list.end()) {
- if (it->GetPriority() > thread->GetPriority()) {
- break;
+ // Try to find an existing held lock.
+ for (auto& held_lock : held_lock_info_list) {
+ if (held_lock.GetAddressKey() == address_key_ &&
+ held_lock.GetIsKernelAddressKey() == is_kernel_address_key_) {
+ return std::addressof(held_lock);
}
- it++;
}
+ return nullptr;
+}
+
+void KThread::AddWaiterImpl(KThread* thread) {
+ ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel));
+ ASSERT(thread->GetConditionVariableTree() == nullptr);
+
+ // Get the thread's address key.
+ const auto address_key_ = thread->GetAddressKey();
+ const auto is_kernel_address_key_ = thread->GetIsKernelAddressKey();
+
// Keep track of how many kernel waiters we have.
- if (thread->GetAddressKeyIsKernel()) {
+ if (is_kernel_address_key_) {
ASSERT((num_kernel_waiters++) >= 0);
KScheduler::SetSchedulerUpdateNeeded(kernel);
}
- // Insert the waiter.
- waiter_list.insert(it, *thread);
- thread->SetLockOwner(this);
+ // Get the relevant lock info.
+ auto* lock_info = this->FindHeldLock(address_key_, is_kernel_address_key_);
+ if (lock_info == nullptr) {
+ // Create a new lock for the address key.
+ lock_info =
+ LockWithPriorityInheritanceInfo::Create(kernel, address_key_, is_kernel_address_key_);
+
+ // Add the new lock to our list.
+ this->AddHeldLock(lock_info);
+ }
+
+ // Add the thread as waiter to the lock info.
+ lock_info->AddWaiter(thread);
}
void KThread::RemoveWaiterImpl(KThread* thread) {
- ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+ ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel));
// Keep track of how many kernel waiters we have.
- if (thread->GetAddressKeyIsKernel()) {
+ if (thread->GetIsKernelAddressKey()) {
ASSERT((num_kernel_waiters--) > 0);
KScheduler::SetSchedulerUpdateNeeded(kernel);
}
+ // Get the info for the lock the thread is waiting on.
+ auto* lock_info = thread->GetWaitingLockInfo();
+ ASSERT(lock_info->GetOwner() == this);
+
// Remove the waiter.
- waiter_list.erase(waiter_list.iterator_to(*thread));
- thread->SetLockOwner(nullptr);
+ if (lock_info->RemoveWaiter(thread)) {
+ held_lock_info_list.erase(held_lock_info_list.iterator_to(*lock_info));
+ LockWithPriorityInheritanceInfo::Free(kernel, lock_info);
+ }
}
-void KThread::RestorePriority(KernelCore& kernel_ctx, KThread* thread) {
- ASSERT(kernel_ctx.GlobalSchedulerContext().IsLocked());
+void KThread::RestorePriority(KernelCore& kernel, KThread* thread) {
+ ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel));
- while (true) {
+ while (thread != nullptr) {
// We want to inherit priority where possible.
s32 new_priority = thread->GetBasePriority();
- if (thread->HasWaiters()) {
- new_priority = std::min(new_priority, thread->waiter_list.front().GetPriority());
+ for (const auto& held_lock : thread->held_lock_info_list) {
+ new_priority =
+ std::min(new_priority, held_lock.GetHighestPriorityWaiter()->GetPriority());
}
// If the priority we would inherit is not different from ours, don't do anything.
@@ -923,9 +1013,18 @@ void KThread::RestorePriority(KernelCore& kernel_ctx, KThread* thread) {
return;
}
+ // Get the owner of whatever lock this thread is waiting on.
+ KThread* const lock_owner = thread->GetLockOwner();
+
+ // If the thread is waiting on some lock, remove it as a waiter to prevent violating red
+ // black tree invariants.
+ if (lock_owner != nullptr) {
+ lock_owner->RemoveWaiterImpl(thread);
+ }
+
// Ensure we don't violate condition variable red black tree invariants.
if (auto* cv_tree = thread->GetConditionVariableTree(); cv_tree != nullptr) {
- BeforeUpdatePriority(kernel_ctx, cv_tree, thread);
+ BeforeUpdatePriority(kernel, cv_tree, thread);
}
// Change the priority.
@@ -934,73 +1033,99 @@ void KThread::RestorePriority(KernelCore& kernel_ctx, KThread* thread) {
// Restore the condition variable, if relevant.
if (auto* cv_tree = thread->GetConditionVariableTree(); cv_tree != nullptr) {
- AfterUpdatePriority(kernel_ctx, cv_tree, thread);
+ AfterUpdatePriority(kernel, cv_tree, thread);
}
- // Update the scheduler.
- KScheduler::OnThreadPriorityChanged(kernel_ctx, thread, old_priority);
-
- // Keep the lock owner up to date.
- KThread* lock_owner = thread->GetLockOwner();
- if (lock_owner == nullptr) {
- return;
+ // If we removed the thread from some lock's waiting list, add it back.
+ if (lock_owner != nullptr) {
+ lock_owner->AddWaiterImpl(thread);
}
- // Update the thread in the lock owner's sorted list, and continue inheriting.
- lock_owner->RemoveWaiterImpl(thread);
- lock_owner->AddWaiterImpl(thread);
+ // Update the scheduler.
+ KScheduler::OnThreadPriorityChanged(kernel, thread, old_priority);
+
+ // Continue inheriting priority.
thread = lock_owner;
}
}
void KThread::AddWaiter(KThread* thread) {
- AddWaiterImpl(thread);
- RestorePriority(kernel, this);
+ this->AddWaiterImpl(thread);
+
+ // If the thread has a higher priority than us, we should inherit.
+ if (thread->GetPriority() < this->GetPriority()) {
+ RestorePriority(kernel, this);
+ }
}
void KThread::RemoveWaiter(KThread* thread) {
- RemoveWaiterImpl(thread);
- RestorePriority(kernel, this);
+ this->RemoveWaiterImpl(thread);
+
+ // If our priority is the same as the thread's (and we've inherited), we may need to restore to
+ // lower priority.
+ if (this->GetPriority() == thread->GetPriority() &&
+ this->GetPriority() < this->GetBasePriority()) {
+ RestorePriority(kernel, this);
+ }
}
-KThread* KThread::RemoveWaiterByKey(s32* out_num_waiters, VAddr key) {
- ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+KThread* KThread::RemoveWaiterByKey(bool* out_has_waiters, VAddr key, bool is_kernel_address_key_) {
+ ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel));
- s32 num_waiters{};
- KThread* next_lock_owner{};
- auto it = waiter_list.begin();
- while (it != waiter_list.end()) {
- if (it->GetAddressKey() == key) {
- KThread* thread = std::addressof(*it);
-
- // Keep track of how many kernel waiters we have.
- if (thread->GetAddressKeyIsKernel()) {
- ASSERT((num_kernel_waiters--) > 0);
- KScheduler::SetSchedulerUpdateNeeded(kernel);
- }
- it = waiter_list.erase(it);
+ // Get the relevant lock info.
+ auto* lock_info = this->FindHeldLock(key, is_kernel_address_key_);
+ if (lock_info == nullptr) {
+ *out_has_waiters = false;
+ return nullptr;
+ }
- // Update the next lock owner.
- if (next_lock_owner == nullptr) {
- next_lock_owner = thread;
- next_lock_owner->SetLockOwner(nullptr);
- } else {
- next_lock_owner->AddWaiterImpl(thread);
- }
- num_waiters++;
- } else {
- it++;
+ // Remove the lock info from our held list.
+ held_lock_info_list.erase(held_lock_info_list.iterator_to(*lock_info));
+
+ // Keep track of how many kernel waiters we have.
+ if (lock_info->GetIsKernelAddressKey()) {
+ num_kernel_waiters -= lock_info->GetWaiterCount();
+ ASSERT(num_kernel_waiters >= 0);
+ KScheduler::SetSchedulerUpdateNeeded(kernel);
+ }
+
+ ASSERT(lock_info->GetWaiterCount() > 0);
+
+ // Remove the highest priority waiter from the lock to be the next owner.
+ KThread* next_lock_owner = lock_info->GetHighestPriorityWaiter();
+ if (lock_info->RemoveWaiter(next_lock_owner)) {
+ // The new owner was the only waiter.
+ *out_has_waiters = false;
+
+ // Free the lock info, since it has no waiters.
+ LockWithPriorityInheritanceInfo::Free(kernel, lock_info);
+ } else {
+ // There are additional waiters on the lock.
+ *out_has_waiters = true;
+
+ // Add the lock to the new owner's held list.
+ next_lock_owner->AddHeldLock(lock_info);
+
+ // Keep track of any kernel waiters for the new owner.
+ if (lock_info->GetIsKernelAddressKey()) {
+ next_lock_owner->num_kernel_waiters += lock_info->GetWaiterCount();
+ ASSERT(next_lock_owner->num_kernel_waiters > 0);
+
+ // NOTE: No need to set scheduler update needed, because we will have already done so
+ // when removing earlier.
}
}
- // Do priority updates, if we have a next owner.
- if (next_lock_owner) {
+ // If our priority is the same as the next owner's (and we've inherited), we may need to restore
+ // to lower priority.
+ if (this->GetPriority() == next_lock_owner->GetPriority() &&
+ this->GetPriority() < this->GetBasePriority()) {
RestorePriority(kernel, this);
- RestorePriority(kernel, next_lock_owner);
+ // NOTE: No need to restore priority on the next lock owner, because it was already the
+ // highest priority waiter on the lock.
}
- // Return output.
- *out_num_waiters = num_waiters;
+ // Return the next lock owner.
return next_lock_owner;
}
@@ -1117,9 +1242,7 @@ ThreadState KThread::RequestTerminate() {
}
// Change the thread's priority to be higher than any system thread's.
- if (this->GetBasePriority() >= Svc::SystemThreadPriorityHighest) {
- this->SetBasePriority(TerminatingThreadPriority);
- }
+ this->IncreaseBasePriority(TerminatingThreadPriority);
// If the thread is runnable, send a termination interrupt to other cores.
if (this->GetState() == ThreadState::Runnable) {
diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h
index ca82ce3b6..bd125f5f1 100644
--- a/src/core/hle/kernel/k_thread.h
+++ b/src/core/hle/kernel/k_thread.h
@@ -339,13 +339,7 @@ public:
void SetInterruptFlag();
void ClearInterruptFlag();
- [[nodiscard]] KThread* GetLockOwner() const {
- return lock_owner;
- }
-
- void SetLockOwner(KThread* owner) {
- lock_owner = owner;
- }
+ KThread* GetLockOwner() const;
[[nodiscard]] const KAffinityMask& GetAffinityMask() const {
return physical_affinity_mask;
@@ -434,6 +428,10 @@ public:
VAddr user_stack_top, s32 prio, s32 virt_core,
KProcess* owner);
+ [[nodiscard]] static Result InitializeServiceThread(Core::System& system, KThread* thread,
+ std::function<void()>&& thread_func,
+ s32 prio, s32 virt_core, KProcess* owner);
+
public:
struct StackParameters {
u8 svc_permission[0x10];
@@ -597,7 +595,13 @@ public:
[[nodiscard]] Result GetThreadContext3(std::vector<u8>& out);
- [[nodiscard]] KThread* RemoveWaiterByKey(s32* out_num_waiters, VAddr key);
+ [[nodiscard]] KThread* RemoveUserWaiterByKey(bool* out_has_waiters, VAddr key) {
+ return this->RemoveWaiterByKey(out_has_waiters, key, false);
+ }
+
+ [[nodiscard]] KThread* RemoveKernelWaiterByKey(bool* out_has_waiters, VAddr key) {
+ return this->RemoveWaiterByKey(out_has_waiters, key, true);
+ }
[[nodiscard]] VAddr GetAddressKey() const {
return address_key;
@@ -607,8 +611,8 @@ public:
return address_key_value;
}
- [[nodiscard]] bool GetAddressKeyIsKernel() const {
- return address_key_is_kernel;
+ [[nodiscard]] bool GetIsKernelAddressKey() const {
+ return is_kernel_address_key;
}
//! NB: intentional deviation from official kernel.
@@ -617,20 +621,17 @@ public:
// to cope with arbitrary host pointers making their way
// into things.
- void SetUserAddressKey(VAddr key) {
- address_key = key;
- address_key_is_kernel = false;
- }
-
void SetUserAddressKey(VAddr key, u32 val) {
+ ASSERT(waiting_lock_info == nullptr);
address_key = key;
address_key_value = val;
- address_key_is_kernel = false;
+ is_kernel_address_key = false;
}
void SetKernelAddressKey(VAddr key) {
+ ASSERT(waiting_lock_info == nullptr);
address_key = key;
- address_key_is_kernel = true;
+ is_kernel_address_key = true;
}
void ClearWaitQueue() {
@@ -642,10 +643,6 @@ public:
void EndWait(Result wait_result_);
void CancelWait(Result wait_result_, bool cancel_timer_task);
- [[nodiscard]] bool HasWaiters() const {
- return !waiter_list.empty();
- }
-
[[nodiscard]] s32 GetNumKernelWaiters() const {
return num_kernel_waiters;
}
@@ -675,6 +672,9 @@ public:
}
private:
+ [[nodiscard]] KThread* RemoveWaiterByKey(bool* out_has_waiters, VAddr key,
+ bool is_kernel_address_key);
+
static constexpr size_t PriorityInheritanceCountMax = 10;
union SyncObjectBuffer {
std::array<KSynchronizationObject*, Svc::ArgumentHandleCountMax> sync_objects{};
@@ -718,13 +718,14 @@ private:
};
void AddWaiterImpl(KThread* thread);
-
void RemoveWaiterImpl(KThread* thread);
+ static void RestorePriority(KernelCore& kernel, KThread* thread);
void StartTermination();
-
void FinishTermination();
+ void IncreaseBasePriority(s32 priority);
+
[[nodiscard]] Result Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack_top,
s32 prio, s32 virt_core, KProcess* owner, ThreadType type);
@@ -733,8 +734,6 @@ private:
s32 core, KProcess* owner, ThreadType type,
std::function<void()>&& init_func);
- static void RestorePriority(KernelCore& kernel_ctx, KThread* thread);
-
// For core KThread implementation
ThreadContext32 thread_context_32{};
ThreadContext64 thread_context_64{};
@@ -745,6 +744,127 @@ private:
&KThread::condvar_arbiter_tree_node>;
using ConditionVariableThreadTree =
ConditionVariableThreadTreeTraits::TreeType<ConditionVariableComparator>;
+
+private:
+ struct LockWithPriorityInheritanceComparator {
+ struct RedBlackKeyType {
+ s32 m_priority;
+
+ constexpr s32 GetPriority() const {
+ return m_priority;
+ }
+ };
+
+ template <typename T>
+ requires(std::same_as<T, KThread> || std::same_as<T, RedBlackKeyType>)
+ static constexpr int Compare(const T& lhs, const KThread& rhs) {
+ if (lhs.GetPriority() < rhs.GetPriority()) {
+ // Sort by priority.
+ return -1;
+ } else {
+ return 1;
+ }
+ }
+ };
+ static_assert(std::same_as<Common::RedBlackKeyType<LockWithPriorityInheritanceComparator, void>,
+ LockWithPriorityInheritanceComparator::RedBlackKeyType>);
+
+ using LockWithPriorityInheritanceThreadTreeTraits =
+ Common::IntrusiveRedBlackTreeMemberTraitsDeferredAssert<
+ &KThread::condvar_arbiter_tree_node>;
+ using LockWithPriorityInheritanceThreadTree =
+ ConditionVariableThreadTreeTraits::TreeType<LockWithPriorityInheritanceComparator>;
+
+public:
+ class LockWithPriorityInheritanceInfo : public KSlabAllocated<LockWithPriorityInheritanceInfo>,
+ public boost::intrusive::list_base_hook<> {
+ public:
+ explicit LockWithPriorityInheritanceInfo(KernelCore&) {}
+
+ static LockWithPriorityInheritanceInfo* Create(KernelCore& kernel, VAddr address_key,
+ bool is_kernel_address_key) {
+ // Create a new lock info.
+ auto* new_lock = LockWithPriorityInheritanceInfo::Allocate(kernel);
+ ASSERT(new_lock != nullptr);
+
+ // Set the new lock's address key.
+ new_lock->m_address_key = address_key;
+ new_lock->m_is_kernel_address_key = is_kernel_address_key;
+
+ return new_lock;
+ }
+
+ void SetOwner(KThread* new_owner) {
+ // Set new owner.
+ m_owner = new_owner;
+ }
+
+ void AddWaiter(KThread* waiter) {
+ // Insert the waiter.
+ m_tree.insert(*waiter);
+ m_waiter_count++;
+
+ waiter->SetWaitingLockInfo(this);
+ }
+
+ [[nodiscard]] bool RemoveWaiter(KThread* waiter) {
+ m_tree.erase(m_tree.iterator_to(*waiter));
+
+ waiter->SetWaitingLockInfo(nullptr);
+
+ return (--m_waiter_count) == 0;
+ }
+
+ KThread* GetHighestPriorityWaiter() {
+ return std::addressof(m_tree.front());
+ }
+ const KThread* GetHighestPriorityWaiter() const {
+ return std::addressof(m_tree.front());
+ }
+
+ LockWithPriorityInheritanceThreadTree& GetThreadTree() {
+ return m_tree;
+ }
+ const LockWithPriorityInheritanceThreadTree& GetThreadTree() const {
+ return m_tree;
+ }
+
+ VAddr GetAddressKey() const {
+ return m_address_key;
+ }
+ bool GetIsKernelAddressKey() const {
+ return m_is_kernel_address_key;
+ }
+ KThread* GetOwner() const {
+ return m_owner;
+ }
+ u32 GetWaiterCount() const {
+ return m_waiter_count;
+ }
+
+ private:
+ LockWithPriorityInheritanceThreadTree m_tree{};
+ VAddr m_address_key{};
+ KThread* m_owner{};
+ u32 m_waiter_count{};
+ bool m_is_kernel_address_key{};
+ };
+
+ void SetWaitingLockInfo(LockWithPriorityInheritanceInfo* lock) {
+ waiting_lock_info = lock;
+ }
+
+ LockWithPriorityInheritanceInfo* GetWaitingLockInfo() {
+ return waiting_lock_info;
+ }
+
+ void AddHeldLock(LockWithPriorityInheritanceInfo* lock_info);
+ LockWithPriorityInheritanceInfo* FindHeldLock(VAddr address_key, bool is_kernel_address_key);
+
+private:
+ using LockWithPriorityInheritanceInfoList =
+ boost::intrusive::list<LockWithPriorityInheritanceInfo>;
+
ConditionVariableThreadTree* condvar_tree{};
u64 condvar_key{};
u64 virtual_affinity_mask{};
@@ -761,9 +881,9 @@ private:
s64 last_scheduled_tick{};
std::array<QueueEntry, Core::Hardware::NUM_CPU_CORES> per_core_priority_queue_entry{};
KThreadQueue* wait_queue{};
- WaiterList waiter_list{};
+ LockWithPriorityInheritanceInfoList held_lock_info_list{};
+ LockWithPriorityInheritanceInfo* waiting_lock_info{};
WaiterList pinned_waiter_list{};
- KThread* lock_owner{};
u32 address_key_value{};
u32 suspend_request_flags{};
u32 suspend_allowed_flags{};
@@ -787,7 +907,7 @@ private:
bool debug_attached{};
s8 priority_inheritance_count{};
bool resource_limit_release_hint{};
- bool address_key_is_kernel{};
+ bool is_kernel_address_key{};
StackParameters stack_parameters{};
Common::SpinLock context_guard{};
@@ -810,10 +930,12 @@ public:
void SetConditionVariable(ConditionVariableThreadTree* tree, VAddr address, u64 cv_key,
u32 value) {
+ ASSERT(waiting_lock_info == nullptr);
condvar_tree = tree;
condvar_key = cv_key;
address_key = address;
address_key_value = value;
+ is_kernel_address_key = false;
}
void ClearConditionVariable() {
@@ -825,6 +947,7 @@ public:
}
void SetAddressArbiter(ConditionVariableThreadTree* tree, u64 address) {
+ ASSERT(waiting_lock_info == nullptr);
condvar_tree = tree;
condvar_key = address;
}
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index 2ff253183..ef7057ff7 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -34,14 +34,15 @@
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_resource_limit.h"
#include "core/hle/kernel/k_scheduler.h"
+#include "core/hle/kernel/k_scoped_resource_reservation.h"
#include "core/hle/kernel/k_shared_memory.h"
#include "core/hle/kernel/k_system_resource.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/k_worker_task_manager.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/physical_core.h"
-#include "core/hle/kernel/service_thread.h"
#include "core/hle/result.h"
+#include "core/hle/service/server_manager.h"
#include "core/hle/service/sm/sm.h"
#include "core/memory.h"
@@ -55,9 +56,7 @@ struct KernelCore::Impl {
static constexpr size_t BlockInfoSlabHeapSize = 4000;
static constexpr size_t ReservedDynamicPageCount = 64;
- explicit Impl(Core::System& system_, KernelCore& kernel_)
- : service_threads_manager{1, "ServiceThreadsManager"},
- service_thread_barrier{2}, system{system_} {}
+ explicit Impl(Core::System& system_, KernelCore& kernel_) : system{system_} {}
void SetMulticore(bool is_multi) {
is_multicore = is_multi;
@@ -98,8 +97,6 @@ struct KernelCore::Impl {
InitializeHackSharedMemory();
RegisterHostThread(nullptr);
-
- default_service_thread = &CreateServiceThread(kernel, "DefaultServiceThread");
}
void InitializeCores() {
@@ -140,11 +137,6 @@ struct KernelCore::Impl {
preemption_event = nullptr;
- for (auto& iter : named_ports) {
- iter.second->Close();
- }
- named_ports.clear();
-
exclusive_monitor.reset();
// Cleanup persistent kernel objects
@@ -207,8 +199,9 @@ struct KernelCore::Impl {
}
void CloseServices() {
- // Ensures all service threads gracefully shutdown.
- ClearServiceThreads();
+ // Ensures all servers gracefully shutdown.
+ std::scoped_lock lk{server_lock};
+ server_managers.clear();
}
void InitializePhysicalCores() {
@@ -761,55 +754,6 @@ struct KernelCore::Impl {
"HidBus:SharedMemory");
}
- KClientPort* CreateNamedServicePort(std::string name) {
- auto search = service_interface_factory.find(name);
- if (search == service_interface_factory.end()) {
- UNIMPLEMENTED();
- return {};
- }
-
- return &search->second(system.ServiceManager(), system);
- }
-
- void RegisterNamedServiceHandler(std::string name, KServerPort* server_port) {
- auto search = service_interface_handlers.find(name);
- if (search == service_interface_handlers.end()) {
- return;
- }
-
- search->second(system.ServiceManager(), server_port);
- }
-
- Kernel::ServiceThread& CreateServiceThread(KernelCore& kernel, const std::string& name) {
- auto* ptr = new ServiceThread(kernel, name);
-
- service_threads_manager.QueueWork(
- [this, ptr]() { service_threads.emplace(ptr, std::unique_ptr<ServiceThread>(ptr)); });
-
- return *ptr;
- }
-
- void ReleaseServiceThread(Kernel::ServiceThread& service_thread) {
- auto* ptr = &service_thread;
-
- if (ptr == default_service_thread) {
- // Nothing to do here, the service is using default_service_thread, which will be
- // released on shutdown.
- return;
- }
-
- service_threads_manager.QueueWork([this, ptr]() { service_threads.erase(ptr); });
- }
-
- void ClearServiceThreads() {
- service_threads_manager.QueueWork([this] {
- service_threads.clear();
- default_service_thread = nullptr;
- service_thread_barrier.Sync();
- });
- service_thread_barrier.Sync();
- }
-
std::mutex registered_objects_lock;
std::mutex registered_in_use_objects_lock;
@@ -839,14 +783,12 @@ struct KernelCore::Impl {
std::unique_ptr<KObjectNameGlobalData> object_name_global_data;
- /// Map of named ports managed by the kernel, which can be retrieved using
- /// the ConnectToPort SVC.
- std::unordered_map<std::string, ServiceInterfaceFactory> service_interface_factory;
- std::unordered_map<std::string, ServiceInterfaceHandlerFn> service_interface_handlers;
- NamedPortTable named_ports;
std::unordered_set<KAutoObject*> registered_objects;
std::unordered_set<KAutoObject*> registered_in_use_objects;
+ std::mutex server_lock;
+ std::vector<std::unique_ptr<Service::ServerManager>> server_managers;
+
std::unique_ptr<Core::ExclusiveMonitor> exclusive_monitor;
std::array<std::unique_ptr<Kernel::PhysicalCore>, Core::Hardware::NUM_CPU_CORES> cores;
@@ -881,12 +823,6 @@ struct KernelCore::Impl {
// Memory layout
std::unique_ptr<KMemoryLayout> memory_layout;
- // Threads used for services
- std::unordered_map<ServiceThread*, std::unique_ptr<ServiceThread>> service_threads;
- ServiceThread* default_service_thread{};
- Common::ThreadWorker service_threads_manager;
- Common::Barrier service_thread_barrier;
-
std::array<KThread*, Core::Hardware::NUM_CPU_CORES> shutdown_threads{};
std::array<std::unique_ptr<Kernel::KScheduler>, Core::Hardware::NUM_CPU_CORES> schedulers{};
@@ -1050,23 +986,6 @@ void KernelCore::PrepareReschedule(std::size_t id) {
// TODO: Reimplement, this
}
-void KernelCore::RegisterNamedService(std::string name, ServiceInterfaceFactory&& factory) {
- impl->service_interface_factory.emplace(std::move(name), factory);
-}
-
-void KernelCore::RegisterInterfaceForNamedService(std::string name,
- ServiceInterfaceHandlerFn&& handler) {
- impl->service_interface_handlers.emplace(std::move(name), handler);
-}
-
-KClientPort* KernelCore::CreateNamedServicePort(std::string name) {
- return impl->CreateNamedServicePort(std::move(name));
-}
-
-void KernelCore::RegisterNamedServiceHandler(std::string name, KServerPort* server_port) {
- impl->RegisterNamedServiceHandler(std::move(name), server_port);
-}
-
void KernelCore::RegisterKernelObject(KAutoObject* object) {
std::scoped_lock lk{impl->registered_objects_lock};
impl->registered_objects.insert(object);
@@ -1087,8 +1006,19 @@ void KernelCore::UnregisterInUseObject(KAutoObject* object) {
impl->registered_in_use_objects.erase(object);
}
-bool KernelCore::IsValidNamedPort(NamedPortTable::const_iterator port) const {
- return port != impl->named_ports.cend();
+void KernelCore::RunServer(std::unique_ptr<Service::ServerManager>&& server_manager) {
+ auto* manager = server_manager.get();
+
+ {
+ std::scoped_lock lk{impl->server_lock};
+ if (impl->is_shutting_down) {
+ return;
+ }
+
+ impl->server_managers.emplace_back(std::move(server_manager));
+ }
+
+ manager->LoopProcess();
}
u32 KernelCore::CreateNewObjectID() {
@@ -1127,6 +1057,87 @@ void KernelCore::RegisterHostThread(KThread* existing_thread) {
}
}
+static std::jthread RunHostThreadFunc(KernelCore& kernel, KProcess* process,
+ std::string&& thread_name, std::function<void()>&& func) {
+ // Reserve a new thread from the process resource limit.
+ KScopedResourceReservation thread_reservation(process, LimitableResource::ThreadCountMax);
+ ASSERT(thread_reservation.Succeeded());
+
+ // Initialize the thread.
+ KThread* thread = KThread::Create(kernel);
+ ASSERT(R_SUCCEEDED(KThread::InitializeDummyThread(thread, process)));
+
+ // Commit the thread reservation.
+ thread_reservation.Commit();
+
+ return std::jthread(
+ [&kernel, thread, thread_name{std::move(thread_name)}, func{std::move(func)}] {
+ // Set the thread name.
+ Common::SetCurrentThreadName(thread_name.c_str());
+
+ // Register the thread.
+ kernel.RegisterHostThread(thread);
+
+ // Run the callback.
+ func();
+
+ // Close the thread.
+ // This will free the process if it is the last reference.
+ thread->Close();
+ });
+}
+
+std::jthread KernelCore::RunOnHostCoreProcess(std::string&& process_name,
+ std::function<void()> func) {
+ // Make a new process.
+ KProcess* process = KProcess::Create(*this);
+ ASSERT(R_SUCCEEDED(KProcess::Initialize(process, System(), "", KProcess::ProcessType::Userland,
+ GetSystemResourceLimit())));
+
+ // Ensure that we don't hold onto any extra references.
+ SCOPE_EXIT({ process->Close(); });
+
+ // Run the host thread.
+ return RunHostThreadFunc(*this, process, std::move(process_name), std::move(func));
+}
+
+std::jthread KernelCore::RunOnHostCoreThread(std::string&& thread_name,
+ std::function<void()> func) {
+ // Get the current process.
+ KProcess* process = GetCurrentProcessPointer(*this);
+
+ // Run the host thread.
+ return RunHostThreadFunc(*this, process, std::move(thread_name), std::move(func));
+}
+
+void KernelCore::RunOnGuestCoreProcess(std::string&& process_name, std::function<void()> func) {
+ constexpr s32 ServiceThreadPriority = 16;
+ constexpr s32 ServiceThreadCore = 3;
+
+ // Make a new process.
+ KProcess* process = KProcess::Create(*this);
+ ASSERT(R_SUCCEEDED(KProcess::Initialize(process, System(), "", KProcess::ProcessType::Userland,
+ GetSystemResourceLimit())));
+
+ // Ensure that we don't hold onto any extra references.
+ SCOPE_EXIT({ process->Close(); });
+
+ // Reserve a new thread from the process resource limit.
+ KScopedResourceReservation thread_reservation(process, LimitableResource::ThreadCountMax);
+ ASSERT(thread_reservation.Succeeded());
+
+ // Initialize the thread.
+ KThread* thread = KThread::Create(*this);
+ ASSERT(R_SUCCEEDED(KThread::InitializeServiceThread(
+ System(), thread, std::move(func), ServiceThreadPriority, ServiceThreadCore, process)));
+
+ // Commit the thread reservation.
+ thread_reservation.Commit();
+
+ // Begin running the thread.
+ ASSERT(R_SUCCEEDED(thread->Run()));
+}
+
u32 KernelCore::GetCurrentHostThreadID() const {
return impl->GetCurrentHostThreadID();
}
@@ -1271,18 +1282,6 @@ void KernelCore::ExitSVCProfile() {
MicroProfileLeave(MICROPROFILE_TOKEN(Kernel_SVC), impl->svc_ticks[CurrentPhysicalCoreIndex()]);
}
-Kernel::ServiceThread& KernelCore::CreateServiceThread(const std::string& name) {
- return impl->CreateServiceThread(*this, name);
-}
-
-Kernel::ServiceThread& KernelCore::GetDefaultServiceThread() const {
- return *impl->default_service_thread;
-}
-
-void KernelCore::ReleaseServiceThread(Kernel::ServiceThread& service_thread) {
- impl->ReleaseServiceThread(service_thread);
-}
-
Init::KSlabResourceCounts& KernelCore::SlabResourceCounts() {
return impl->slab_resource_counts;
}
@@ -1319,4 +1318,97 @@ const Core::System& KernelCore::System() const {
return impl->system;
}
+struct KernelCore::SlabHeapContainer {
+ KSlabHeap<KClientSession> client_session;
+ KSlabHeap<KEvent> event;
+ KSlabHeap<KLinkedListNode> linked_list_node;
+ KSlabHeap<KPort> port;
+ KSlabHeap<KProcess> process;
+ KSlabHeap<KResourceLimit> resource_limit;
+ KSlabHeap<KSession> session;
+ KSlabHeap<KSharedMemory> shared_memory;
+ KSlabHeap<KSharedMemoryInfo> shared_memory_info;
+ KSlabHeap<KThread> thread;
+ KSlabHeap<KTransferMemory> transfer_memory;
+ KSlabHeap<KCodeMemory> code_memory;
+ KSlabHeap<KDeviceAddressSpace> device_address_space;
+ KSlabHeap<KPageBuffer> page_buffer;
+ KSlabHeap<KThreadLocalPage> thread_local_page;
+ KSlabHeap<KObjectName> object_name;
+ KSlabHeap<KSessionRequest> session_request;
+ KSlabHeap<KSecureSystemResource> secure_system_resource;
+ KSlabHeap<KThread::LockWithPriorityInheritanceInfo> lock_info;
+ KSlabHeap<KEventInfo> event_info;
+ KSlabHeap<KDebug> debug;
+};
+
+template <typename T>
+KSlabHeap<T>& KernelCore::SlabHeap() {
+ if constexpr (std::is_same_v<T, KClientSession>) {
+ return slab_heap_container->client_session;
+ } else if constexpr (std::is_same_v<T, KEvent>) {
+ return slab_heap_container->event;
+ } else if constexpr (std::is_same_v<T, KLinkedListNode>) {
+ return slab_heap_container->linked_list_node;
+ } else if constexpr (std::is_same_v<T, KPort>) {
+ return slab_heap_container->port;
+ } else if constexpr (std::is_same_v<T, KProcess>) {
+ return slab_heap_container->process;
+ } else if constexpr (std::is_same_v<T, KResourceLimit>) {
+ return slab_heap_container->resource_limit;
+ } else if constexpr (std::is_same_v<T, KSession>) {
+ return slab_heap_container->session;
+ } else if constexpr (std::is_same_v<T, KSharedMemory>) {
+ return slab_heap_container->shared_memory;
+ } else if constexpr (std::is_same_v<T, KSharedMemoryInfo>) {
+ return slab_heap_container->shared_memory_info;
+ } else if constexpr (std::is_same_v<T, KThread>) {
+ return slab_heap_container->thread;
+ } else if constexpr (std::is_same_v<T, KTransferMemory>) {
+ return slab_heap_container->transfer_memory;
+ } else if constexpr (std::is_same_v<T, KCodeMemory>) {
+ return slab_heap_container->code_memory;
+ } else if constexpr (std::is_same_v<T, KDeviceAddressSpace>) {
+ return slab_heap_container->device_address_space;
+ } else if constexpr (std::is_same_v<T, KPageBuffer>) {
+ return slab_heap_container->page_buffer;
+ } else if constexpr (std::is_same_v<T, KThreadLocalPage>) {
+ return slab_heap_container->thread_local_page;
+ } else if constexpr (std::is_same_v<T, KObjectName>) {
+ return slab_heap_container->object_name;
+ } else if constexpr (std::is_same_v<T, KSessionRequest>) {
+ return slab_heap_container->session_request;
+ } else if constexpr (std::is_same_v<T, KSecureSystemResource>) {
+ return slab_heap_container->secure_system_resource;
+ } else if constexpr (std::is_same_v<T, KThread::LockWithPriorityInheritanceInfo>) {
+ return slab_heap_container->lock_info;
+ } else if constexpr (std::is_same_v<T, KEventInfo>) {
+ return slab_heap_container->event_info;
+ } else if constexpr (std::is_same_v<T, KDebug>) {
+ return slab_heap_container->debug;
+ }
+}
+
+template KSlabHeap<KClientSession>& KernelCore::SlabHeap();
+template KSlabHeap<KEvent>& KernelCore::SlabHeap();
+template KSlabHeap<KLinkedListNode>& KernelCore::SlabHeap();
+template KSlabHeap<KPort>& KernelCore::SlabHeap();
+template KSlabHeap<KProcess>& KernelCore::SlabHeap();
+template KSlabHeap<KResourceLimit>& KernelCore::SlabHeap();
+template KSlabHeap<KSession>& KernelCore::SlabHeap();
+template KSlabHeap<KSharedMemory>& KernelCore::SlabHeap();
+template KSlabHeap<KSharedMemoryInfo>& KernelCore::SlabHeap();
+template KSlabHeap<KThread>& KernelCore::SlabHeap();
+template KSlabHeap<KTransferMemory>& KernelCore::SlabHeap();
+template KSlabHeap<KCodeMemory>& KernelCore::SlabHeap();
+template KSlabHeap<KDeviceAddressSpace>& KernelCore::SlabHeap();
+template KSlabHeap<KPageBuffer>& KernelCore::SlabHeap();
+template KSlabHeap<KThreadLocalPage>& KernelCore::SlabHeap();
+template KSlabHeap<KObjectName>& KernelCore::SlabHeap();
+template KSlabHeap<KSessionRequest>& KernelCore::SlabHeap();
+template KSlabHeap<KSecureSystemResource>& KernelCore::SlabHeap();
+template KSlabHeap<KThread::LockWithPriorityInheritanceInfo>& KernelCore::SlabHeap();
+template KSlabHeap<KEventInfo>& KernelCore::SlabHeap();
+template KSlabHeap<KDebug>& KernelCore::SlabHeap();
+
} // namespace Kernel
diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h
index 6e0668f7f..1b380a07b 100644
--- a/src/core/hle/kernel/kernel.h
+++ b/src/core/hle/kernel/kernel.h
@@ -9,6 +9,8 @@
#include <string>
#include <unordered_map>
#include <vector>
+
+#include "common/polyfill_thread.h"
#include "core/hardware_properties.h"
#include "core/hle/kernel/k_auto_object.h"
#include "core/hle/kernel/k_slab_heap.h"
@@ -24,6 +26,10 @@ class CoreTiming;
struct EventType;
} // namespace Core::Timing
+namespace Service {
+class ServerManager;
+}
+
namespace Service::SM {
class ServiceManager;
}
@@ -65,13 +71,6 @@ class KTransferMemory;
class KWorkerTaskManager;
class KCodeMemory;
class PhysicalCore;
-class ServiceThread;
-class Synchronization;
-
-using ServiceInterfaceFactory =
- std::function<KClientPort&(Service::SM::ServiceManager&, Core::System&)>;
-
-using ServiceInterfaceHandlerFn = std::function<void(Service::SM::ServiceManager&, KServerPort*)>;
namespace Init {
struct KSlabResourceCounts;
@@ -80,15 +79,8 @@ struct KSlabResourceCounts;
template <typename T>
class KSlabHeap;
-using EmuThreadHandle = uintptr_t;
-constexpr EmuThreadHandle EmuThreadHandleInvalid{};
-constexpr EmuThreadHandle EmuThreadHandleReserved{1ULL << 63};
-
/// Represents a single instance of the kernel.
class KernelCore {
-private:
- using NamedPortTable = std::unordered_map<std::string, KClientPort*>;
-
public:
/// Constructs an instance of the kernel using the given System
/// instance as a context for any necessary system-related state,
@@ -196,18 +188,6 @@ public:
void InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size);
- /// Registers a named HLE service, passing a factory used to open a port to that service.
- void RegisterNamedService(std::string name, ServiceInterfaceFactory&& factory);
-
- /// Registers a setup function for the named HLE service.
- void RegisterInterfaceForNamedService(std::string name, ServiceInterfaceHandlerFn&& handler);
-
- /// Opens a port to a service previously registered with RegisterNamedService.
- KClientPort* CreateNamedServicePort(std::string name);
-
- /// Accepts a session on a port created by CreateNamedServicePort.
- void RegisterNamedServiceHandler(std::string name, KServerPort* server_port);
-
/// Registers all kernel objects with the global emulation state, this is purely for tracking
/// leaks after emulation has been shutdown.
void RegisterKernelObject(KAutoObject* object);
@@ -224,8 +204,8 @@ public:
/// destroyed during the current emulation session.
void UnregisterInUseObject(KAutoObject* object);
- /// Determines whether or not the given port is a valid named port.
- bool IsValidNamedPort(NamedPortTable::const_iterator port) const;
+ // Runs the given server manager until shutdown.
+ void RunServer(std::unique_ptr<Service::ServerManager>&& server_manager);
/// Gets the current host_thread/guest_thread pointer.
KThread* GetCurrentEmuThread() const;
@@ -242,6 +222,12 @@ public:
/// Register the current thread as a non CPU core thread.
void RegisterHostThread(KThread* existing_thread = nullptr);
+ void RunOnGuestCoreProcess(std::string&& process_name, std::function<void()> func);
+
+ std::jthread RunOnHostCoreProcess(std::string&& process_name, std::function<void()> func);
+
+ std::jthread RunOnHostCoreThread(std::string&& thread_name, std::function<void()> func);
+
/// Gets global data for KObjectName.
KObjectNameGlobalData& ObjectNameGlobalData();
@@ -310,33 +296,6 @@ public:
void ExitSVCProfile();
- /**
- * Creates a host thread to execute HLE service requests, which are used to execute service
- * routines asynchronously. While these are allocated per ServerSession, these need to be owned
- * and managed outside of ServerSession to avoid a circular dependency. In general, most
- * services can just use the default service thread, and not need their own host service thread.
- * See GetDefaultServiceThread.
- * @param name String name for the ServerSession creating this thread, used for debug
- * purposes.
- * @returns A reference to the newly created service thread.
- */
- Kernel::ServiceThread& CreateServiceThread(const std::string& name);
-
- /**
- * Gets the default host service thread, which executes HLE service requests. Unless service
- * requests need to block on the host, the default service thread should be used in favor of
- * creating a new service thread.
- * @returns A reference to the default service thread.
- */
- Kernel::ServiceThread& GetDefaultServiceThread() const;
-
- /**
- * Releases a HLE service thread, instructing KernelCore to free it. This should be called when
- * the ServerSession associated with the thread is destroyed.
- * @param service_thread Service thread to release.
- */
- void ReleaseServiceThread(Kernel::ServiceThread& service_thread);
-
/// Workaround for single-core mode when preempting threads while idle.
bool IsPhantomModeForSingleCore() const;
void SetIsPhantomModeForSingleCore(bool value);
@@ -346,49 +305,7 @@ public:
/// Gets the slab heap for the specified kernel object type.
template <typename T>
- KSlabHeap<T>& SlabHeap() {
- if constexpr (std::is_same_v<T, KClientSession>) {
- return slab_heap_container->client_session;
- } else if constexpr (std::is_same_v<T, KEvent>) {
- return slab_heap_container->event;
- } else if constexpr (std::is_same_v<T, KLinkedListNode>) {
- return slab_heap_container->linked_list_node;
- } else if constexpr (std::is_same_v<T, KPort>) {
- return slab_heap_container->port;
- } else if constexpr (std::is_same_v<T, KProcess>) {
- return slab_heap_container->process;
- } else if constexpr (std::is_same_v<T, KResourceLimit>) {
- return slab_heap_container->resource_limit;
- } else if constexpr (std::is_same_v<T, KSession>) {
- return slab_heap_container->session;
- } else if constexpr (std::is_same_v<T, KSharedMemory>) {
- return slab_heap_container->shared_memory;
- } else if constexpr (std::is_same_v<T, KSharedMemoryInfo>) {
- return slab_heap_container->shared_memory_info;
- } else if constexpr (std::is_same_v<T, KThread>) {
- return slab_heap_container->thread;
- } else if constexpr (std::is_same_v<T, KTransferMemory>) {
- return slab_heap_container->transfer_memory;
- } else if constexpr (std::is_same_v<T, KCodeMemory>) {
- return slab_heap_container->code_memory;
- } else if constexpr (std::is_same_v<T, KDeviceAddressSpace>) {
- return slab_heap_container->device_address_space;
- } else if constexpr (std::is_same_v<T, KPageBuffer>) {
- return slab_heap_container->page_buffer;
- } else if constexpr (std::is_same_v<T, KThreadLocalPage>) {
- return slab_heap_container->thread_local_page;
- } else if constexpr (std::is_same_v<T, KObjectName>) {
- return slab_heap_container->object_name;
- } else if constexpr (std::is_same_v<T, KSessionRequest>) {
- return slab_heap_container->session_request;
- } else if constexpr (std::is_same_v<T, KSecureSystemResource>) {
- return slab_heap_container->secure_system_resource;
- } else if constexpr (std::is_same_v<T, KEventInfo>) {
- return slab_heap_container->event_info;
- } else if constexpr (std::is_same_v<T, KDebug>) {
- return slab_heap_container->debug;
- }
- }
+ KSlabHeap<T>& SlabHeap();
/// Gets the current slab resource counts.
Init::KSlabResourceCounts& SlabResourceCounts();
@@ -434,28 +351,7 @@ private:
private:
/// Helper to encapsulate all slab heaps in a single heap allocated container
- struct SlabHeapContainer {
- KSlabHeap<KClientSession> client_session;
- KSlabHeap<KEvent> event;
- KSlabHeap<KLinkedListNode> linked_list_node;
- KSlabHeap<KPort> port;
- KSlabHeap<KProcess> process;
- KSlabHeap<KResourceLimit> resource_limit;
- KSlabHeap<KSession> session;
- KSlabHeap<KSharedMemory> shared_memory;
- KSlabHeap<KSharedMemoryInfo> shared_memory_info;
- KSlabHeap<KThread> thread;
- KSlabHeap<KTransferMemory> transfer_memory;
- KSlabHeap<KCodeMemory> code_memory;
- KSlabHeap<KDeviceAddressSpace> device_address_space;
- KSlabHeap<KPageBuffer> page_buffer;
- KSlabHeap<KThreadLocalPage> thread_local_page;
- KSlabHeap<KObjectName> object_name;
- KSlabHeap<KSessionRequest> session_request;
- KSlabHeap<KSecureSystemResource> secure_system_resource;
- KSlabHeap<KEventInfo> event_info;
- KSlabHeap<KDebug> debug;
- };
+ struct SlabHeapContainer;
std::unique_ptr<SlabHeapContainer> slab_heap_container;
};
diff --git a/src/core/hle/kernel/service_thread.cpp b/src/core/hle/kernel/service_thread.cpp
deleted file mode 100644
index 38afa720b..000000000
--- a/src/core/hle/kernel/service_thread.cpp
+++ /dev/null
@@ -1,206 +0,0 @@
-// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-#include <functional>
-#include <map>
-#include <mutex>
-#include <thread>
-#include <vector>
-
-#include "common/polyfill_thread.h"
-#include "common/scope_exit.h"
-#include "common/thread.h"
-#include "core/hle/ipc_helpers.h"
-#include "core/hle/kernel/hle_ipc.h"
-#include "core/hle/kernel/k_event.h"
-#include "core/hle/kernel/k_scoped_resource_reservation.h"
-#include "core/hle/kernel/k_session.h"
-#include "core/hle/kernel/k_thread.h"
-#include "core/hle/kernel/kernel.h"
-#include "core/hle/kernel/service_thread.h"
-
-namespace Kernel {
-
-class ServiceThread::Impl final {
-public:
- explicit Impl(KernelCore& kernel, const std::string& service_name);
- ~Impl();
-
- void WaitAndProcessImpl();
- void SessionClosed(KServerSession* server_session,
- std::shared_ptr<SessionRequestManager> manager);
- void LoopProcess();
-
- void RegisterServerSession(KServerSession* session,
- std::shared_ptr<SessionRequestManager> manager);
-
-private:
- KernelCore& kernel;
- const std::string m_service_name;
-
- std::jthread m_host_thread{};
- std::mutex m_session_mutex{};
- std::map<KServerSession*, std::shared_ptr<SessionRequestManager>> m_sessions{};
- KEvent* m_wakeup_event{};
- KThread* m_thread{};
- std::atomic<bool> m_shutdown_requested{};
-};
-
-void ServiceThread::Impl::WaitAndProcessImpl() {
- // Create local list of waitable sessions.
- std::vector<KSynchronizationObject*> objs;
- std::vector<std::shared_ptr<SessionRequestManager>> managers;
-
- {
- // Lock to get the set.
- std::scoped_lock lk{m_session_mutex};
-
- // Reserve the needed quantity.
- objs.reserve(m_sessions.size() + 1);
- managers.reserve(m_sessions.size());
-
- // Copy to our local list.
- for (const auto& [session, manager] : m_sessions) {
- objs.push_back(session);
- managers.push_back(manager);
- }
-
- // Insert the wakeup event at the end.
- objs.push_back(&m_wakeup_event->GetReadableEvent());
- }
-
- // Wait on the list of sessions.
- s32 index{-1};
- Result rc = KSynchronizationObject::Wait(kernel, &index, objs.data(),
- static_cast<s32>(objs.size()), -1);
- ASSERT(!rc.IsFailure());
-
- // If this was the wakeup event, clear it and finish.
- if (index >= static_cast<s64>(objs.size() - 1)) {
- m_wakeup_event->Clear();
- return;
- }
-
- // This event is from a server session.
- auto* server_session = static_cast<KServerSession*>(objs[index]);
- auto& manager = managers[index];
-
- // Fetch the HLE request context.
- std::shared_ptr<HLERequestContext> context;
- rc = server_session->ReceiveRequest(&context, manager);
-
- // If the session was closed, handle that.
- if (rc == ResultSessionClosed) {
- SessionClosed(server_session, manager);
-
- // Finish.
- return;
- }
-
- // TODO: handle other cases
- ASSERT(rc == ResultSuccess);
-
- // Perform the request.
- Result service_rc = manager->CompleteSyncRequest(server_session, *context);
-
- // Reply to the client.
- rc = server_session->SendReplyHLE();
-
- if (rc == ResultSessionClosed || service_rc == IPC::ERR_REMOTE_PROCESS_DEAD) {
- SessionClosed(server_session, manager);
- return;
- }
-
- // TODO: handle other cases
- ASSERT(rc == ResultSuccess);
- ASSERT(service_rc == ResultSuccess);
-}
-
-void ServiceThread::Impl::SessionClosed(KServerSession* server_session,
- std::shared_ptr<SessionRequestManager> manager) {
- {
- // Lock to get the set.
- std::scoped_lock lk{m_session_mutex};
-
- // Erase the session.
- ASSERT(m_sessions.erase(server_session) == 1);
- }
-
- // Close our reference to the server session.
- server_session->Close();
-}
-
-void ServiceThread::Impl::LoopProcess() {
- Common::SetCurrentThreadName(m_service_name.c_str());
-
- kernel.RegisterHostThread(m_thread);
-
- while (!m_shutdown_requested.load()) {
- WaitAndProcessImpl();
- }
-}
-
-void ServiceThread::Impl::RegisterServerSession(KServerSession* server_session,
- std::shared_ptr<SessionRequestManager> manager) {
- // Open the server session.
- server_session->Open();
-
- {
- // Lock to get the set.
- std::scoped_lock lk{m_session_mutex};
-
- // Insert the session and manager.
- m_sessions[server_session] = manager;
- }
-
- // Signal the wakeup event.
- m_wakeup_event->Signal();
-}
-
-ServiceThread::Impl::~Impl() {
- // Shut down the processing thread.
- m_shutdown_requested.store(true);
- m_wakeup_event->Signal();
- m_host_thread.join();
-
- // Close all remaining sessions.
- for (const auto& [server_session, manager] : m_sessions) {
- server_session->Close();
- }
-
- // Destroy remaining managers.
- m_sessions.clear();
-
- // Close event.
- m_wakeup_event->GetReadableEvent().Close();
- m_wakeup_event->Close();
-
- // Close thread.
- m_thread->Close();
-}
-
-ServiceThread::Impl::Impl(KernelCore& kernel_, const std::string& service_name)
- : kernel{kernel_}, m_service_name{service_name} {
- // Initialize event.
- m_wakeup_event = KEvent::Create(kernel);
- m_wakeup_event->Initialize(nullptr);
-
- // Initialize thread.
- m_thread = KThread::Create(kernel);
- ASSERT(KThread::InitializeDummyThread(m_thread, nullptr).IsSuccess());
-
- // Start thread.
- m_host_thread = std::jthread([this] { LoopProcess(); });
-}
-
-ServiceThread::ServiceThread(KernelCore& kernel, const std::string& name)
- : impl{std::make_unique<Impl>(kernel, name)} {}
-
-ServiceThread::~ServiceThread() = default;
-
-void ServiceThread::RegisterServerSession(KServerSession* session,
- std::shared_ptr<SessionRequestManager> manager) {
- impl->RegisterServerSession(session, manager);
-}
-
-} // namespace Kernel
diff --git a/src/core/hle/kernel/service_thread.h b/src/core/hle/kernel/service_thread.h
deleted file mode 100644
index fb4325531..000000000
--- a/src/core/hle/kernel/service_thread.h
+++ /dev/null
@@ -1,29 +0,0 @@
-// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-#pragma once
-
-#include <memory>
-#include <string>
-
-namespace Kernel {
-
-class HLERequestContext;
-class KernelCore;
-class KSession;
-class SessionRequestManager;
-
-class ServiceThread final {
-public:
- explicit ServiceThread(KernelCore& kernel, const std::string& name);
- ~ServiceThread();
-
- void RegisterServerSession(KServerSession* session,
- std::shared_ptr<SessionRequestManager> manager);
-
-private:
- class Impl;
- std::unique_ptr<Impl> impl;
-};
-
-} // namespace Kernel
diff --git a/src/core/hle/kernel/svc/svc_info.cpp b/src/core/hle/kernel/svc/svc_info.cpp
index 58dc47508..cbed4dc8c 100644
--- a/src/core/hle/kernel/svc/svc_info.cpp
+++ b/src/core/hle/kernel/svc/svc_info.cpp
@@ -126,6 +126,11 @@ Result GetInfo(Core::System& system, u64* result, InfoType info_id_type, Handle
*result = process->GetTotalPhysicalMemoryUsedWithoutSystemResource();
return ResultSuccess;
+ case InfoType::IsApplication:
+ LOG_WARNING(Kernel_SVC, "(STUBBED) Assuming process is application");
+ *result = true;
+ return ResultSuccess;
+
case InfoType::FreeThreadCount:
*result = process->GetFreeThreadCount();
return ResultSuccess;
diff --git a/src/core/hle/kernel/svc/svc_port.cpp b/src/core/hle/kernel/svc/svc_port.cpp
index 0b5b4ba2b..78c2a8d17 100644
--- a/src/core/hle/kernel/svc/svc_port.cpp
+++ b/src/core/hle/kernel/svc/svc_port.cpp
@@ -12,56 +12,40 @@
namespace Kernel::Svc {
-/// Connect to an OS service given the port name, returns the handle to the port to out
-Result ConnectToNamedPort(Core::System& system, Handle* out, VAddr port_name_address) {
- auto& memory = system.Memory();
- if (!memory.IsValidVirtualAddress(port_name_address)) {
- LOG_ERROR(Kernel_SVC,
- "Port Name Address is not a valid virtual address, port_name_address=0x{:016X}",
- port_name_address);
- return ResultNotFound;
- }
+Result ConnectToNamedPort(Core::System& system, Handle* out, VAddr user_name) {
+ // Copy the provided name from user memory to kernel memory.
+ auto string_name = system.Memory().ReadCString(user_name, KObjectName::NameLengthMax);
- static constexpr std::size_t PortNameMaxLength = 11;
- // Read 1 char beyond the max allowed port name to detect names that are too long.
- const std::string port_name = memory.ReadCString(port_name_address, PortNameMaxLength + 1);
- if (port_name.size() > PortNameMaxLength) {
- LOG_ERROR(Kernel_SVC, "Port name is too long, expected {} but got {}", PortNameMaxLength,
- port_name.size());
- return ResultOutOfRange;
- }
+ std::array<char, KObjectName::NameLengthMax> name{};
+ std::strncpy(name.data(), string_name.c_str(), KObjectName::NameLengthMax - 1);
- LOG_TRACE(Kernel_SVC, "called port_name={}", port_name);
+ // Validate that the name is valid.
+ R_UNLESS(name[sizeof(name) - 1] == '\x00', ResultOutOfRange);
// Get the current handle table.
- auto& kernel = system.Kernel();
- auto& handle_table = GetCurrentProcess(kernel).GetHandleTable();
+ auto& handle_table = GetCurrentProcess(system.Kernel()).GetHandleTable();
// Find the client port.
- auto port = kernel.CreateNamedServicePort(port_name);
- if (!port) {
- LOG_ERROR(Kernel_SVC, "tried to connect to unknown port: {}", port_name);
- return ResultNotFound;
- }
+ auto port = KObjectName::Find<KClientPort>(system.Kernel(), name.data());
+ R_UNLESS(port.IsNotNull(), ResultNotFound);
// Reserve a handle for the port.
// NOTE: Nintendo really does write directly to the output handle here.
R_TRY(handle_table.Reserve(out));
- auto handle_guard = SCOPE_GUARD({ handle_table.Unreserve(*out); });
+ ON_RESULT_FAILURE {
+ handle_table.Unreserve(*out);
+ };
// Create a session.
- KClientSession* session{};
+ KClientSession* session;
R_TRY(port->CreateSession(std::addressof(session)));
- kernel.RegisterNamedServiceHandler(port_name, &port->GetParent()->GetServerPort());
-
// Register the session in the table, close the extra reference.
handle_table.Register(*out, session);
session->Close();
// We succeeded.
- handle_guard.Cancel();
- return ResultSuccess;
+ R_SUCCEED();
}
Result CreatePort(Core::System& system, Handle* out_server, Handle* out_client,
@@ -78,8 +62,11 @@ Result ConnectToPort(Core::System& system, Handle* out_handle, Handle port) {
Result ManageNamedPort(Core::System& system, Handle* out_server_handle, uint64_t user_name,
int32_t max_sessions) {
// Copy the provided name from user memory to kernel memory.
+ auto string_name = system.Memory().ReadCString(user_name, KObjectName::NameLengthMax);
+
+ // Copy the provided name from user memory to kernel memory.
std::array<char, KObjectName::NameLengthMax> name{};
- system.Memory().ReadBlock(user_name, name.data(), sizeof(name));
+ std::strncpy(name.data(), string_name.c_str(), KObjectName::NameLengthMax - 1);
// Validate that sessions and name are valid.
R_UNLESS(max_sessions >= 0, ResultOutOfRange);
diff --git a/src/core/hle/kernel/svc/svc_synchronization.cpp b/src/core/hle/kernel/svc/svc_synchronization.cpp
index 1a8f7e191..9e7bf9530 100644
--- a/src/core/hle/kernel/svc/svc_synchronization.cpp
+++ b/src/core/hle/kernel/svc/svc_synchronization.cpp
@@ -48,19 +48,15 @@ Result ResetSignal(Core::System& system, Handle handle) {
return ResultInvalidHandle;
}
-/// Wait for the given handles to synchronize, timeout after the specified nanoseconds
-Result WaitSynchronization(Core::System& system, s32* index, VAddr handles_address, s32 num_handles,
- s64 nano_seconds) {
- LOG_TRACE(Kernel_SVC, "called handles_address=0x{:X}, num_handles={}, nano_seconds={}",
- handles_address, num_handles, nano_seconds);
-
+static Result WaitSynchronization(Core::System& system, int32_t* out_index, const Handle* handles,
+ int32_t num_handles, int64_t timeout_ns) {
// Ensure number of handles is valid.
- R_UNLESS(0 <= num_handles && num_handles <= ArgumentHandleCountMax, ResultOutOfRange);
+ R_UNLESS(0 <= num_handles && num_handles <= Svc::ArgumentHandleCountMax, ResultOutOfRange);
+ // Get the synchronization context.
auto& kernel = system.Kernel();
+ auto& handle_table = GetCurrentProcess(kernel).GetHandleTable();
std::vector<KSynchronizationObject*> objs(num_handles);
- const auto& handle_table = GetCurrentProcess(kernel).GetHandleTable();
- Handle* handles = system.Memory().GetPointer<Handle>(handles_address);
// Copy user handles.
if (num_handles > 0) {
@@ -68,21 +64,38 @@ Result WaitSynchronization(Core::System& system, s32* index, VAddr handles_addre
R_UNLESS(handle_table.GetMultipleObjects<KSynchronizationObject>(objs.data(), handles,
num_handles),
ResultInvalidHandle);
- for (const auto& obj : objs) {
- kernel.RegisterInUseObject(obj);
- }
}
// Ensure handles are closed when we're done.
SCOPE_EXIT({
- for (s32 i = 0; i < num_handles; ++i) {
- kernel.UnregisterInUseObject(objs[i]);
+ for (auto i = 0; i < num_handles; ++i) {
objs[i]->Close();
}
});
- return KSynchronizationObject::Wait(kernel, index, objs.data(), static_cast<s32>(objs.size()),
- nano_seconds);
+ // Wait on the objects.
+ Result res = KSynchronizationObject::Wait(kernel, out_index, objs.data(),
+ static_cast<s32>(objs.size()), timeout_ns);
+
+ R_SUCCEED_IF(res == ResultSessionClosed);
+ R_RETURN(res);
+}
+
+/// Wait for the given handles to synchronize, timeout after the specified nanoseconds
+Result WaitSynchronization(Core::System& system, int32_t* out_index, VAddr user_handles,
+ int32_t num_handles, int64_t timeout_ns) {
+ LOG_TRACE(Kernel_SVC, "called user_handles={:#x}, num_handles={}, timeout_ns={}", user_handles,
+ num_handles, timeout_ns);
+
+ // Ensure number of handles is valid.
+ R_UNLESS(0 <= num_handles && num_handles <= Svc::ArgumentHandleCountMax, ResultOutOfRange);
+
+ std::vector<Handle> handles(num_handles);
+ if (num_handles > 0) {
+ system.Memory().ReadBlock(user_handles, handles.data(), num_handles * sizeof(Handle));
+ }
+
+ R_RETURN(WaitSynchronization(system, out_index, handles.data(), num_handles, timeout_ns));
}
/// Resumes a thread waiting on WaitSynchronization
diff --git a/src/core/hle/kernel/svc_types.h b/src/core/hle/kernel/svc_types.h
index 542c13461..39355d9c4 100644
--- a/src/core/hle/kernel/svc_types.h
+++ b/src/core/hle/kernel/svc_types.h
@@ -151,6 +151,7 @@ enum class InfoType : u32 {
FreeThreadCount = 24,
ThreadTickCount = 25,
IsSvcPermitted = 26,
+ IoRegionHint = 27,
MesosphereMeta = 65000,
MesosphereCurrentProcess = 65001,