diff options
author | bunnei <bunneidev@gmail.com> | 2021-05-08 08:30:17 +0200 |
---|---|---|
committer | GitHub <noreply@github.com> | 2021-05-08 08:30:17 +0200 |
commit | faa067f175cbf5e916ed75776817f0046e6731c4 (patch) | |
tree | 8ab02a72a6e4d6578848c8da2c02af02684aeec7 /src/core/hle/kernel | |
parent | Merge pull request #6287 from lioncash/ldr-copy (diff) | |
parent | hle: kernel: KPageTable: CanContain should not be constexpr. (diff) | |
download | yuzu-faa067f175cbf5e916ed75776817f0046e6731c4.tar yuzu-faa067f175cbf5e916ed75776817f0046e6731c4.tar.gz yuzu-faa067f175cbf5e916ed75776817f0046e6731c4.tar.bz2 yuzu-faa067f175cbf5e916ed75776817f0046e6731c4.tar.lz yuzu-faa067f175cbf5e916ed75776817f0046e6731c4.tar.xz yuzu-faa067f175cbf5e916ed75776817f0046e6731c4.tar.zst yuzu-faa067f175cbf5e916ed75776817f0046e6731c4.zip |
Diffstat (limited to 'src/core/hle/kernel')
78 files changed, 4134 insertions, 2207 deletions
diff --git a/src/core/hle/kernel/client_port.cpp b/src/core/hle/kernel/client_port.cpp deleted file mode 100644 index 0b6957e31..000000000 --- a/src/core/hle/kernel/client_port.cpp +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2016 Citra Emulator Project -// Licensed under GPLv2 or any later version -// Refer to the license.txt file included. - -#include "core/hle/kernel/client_port.h" -#include "core/hle/kernel/client_session.h" -#include "core/hle/kernel/hle_ipc.h" -#include "core/hle/kernel/object.h" -#include "core/hle/kernel/server_port.h" -#include "core/hle/kernel/session.h" -#include "core/hle/kernel/svc_results.h" - -namespace Kernel { - -ClientPort::ClientPort(KernelCore& kernel) : Object{kernel} {} -ClientPort::~ClientPort() = default; - -std::shared_ptr<ServerPort> ClientPort::GetServerPort() const { - return server_port; -} - -ResultVal<std::shared_ptr<ClientSession>> ClientPort::Connect() { - if (active_sessions >= max_sessions) { - return ResultMaxConnectionsReached; - } - active_sessions++; - - auto [client, server] = Kernel::Session::Create(kernel, name); - - if (server_port->HasHLEHandler()) { - server_port->GetHLEHandler()->ClientConnected(std::move(server)); - } else { - server_port->AppendPendingSession(std::move(server)); - } - - return MakeResult(std::move(client)); -} - -void ClientPort::ConnectionClosed() { - if (active_sessions == 0) { - return; - } - - --active_sessions; -} - -} // namespace Kernel diff --git a/src/core/hle/kernel/client_port.h b/src/core/hle/kernel/client_port.h deleted file mode 100644 index 77559ebf9..000000000 --- a/src/core/hle/kernel/client_port.h +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright 2016 Citra Emulator Project -// Licensed under GPLv2 or any later version -// Refer to the license.txt file included. - -#pragma once - -#include <memory> -#include <string> - -#include "common/common_types.h" -#include "core/hle/kernel/object.h" -#include "core/hle/result.h" - -namespace Kernel { - -class ClientSession; -class KernelCore; -class ServerPort; - -class ClientPort final : public Object { -public: - explicit ClientPort(KernelCore& kernel); - ~ClientPort() override; - - friend class ServerPort; - std::string GetTypeName() const override { - return "ClientPort"; - } - std::string GetName() const override { - return name; - } - - static constexpr HandleType HANDLE_TYPE = HandleType::ClientPort; - HandleType GetHandleType() const override { - return HANDLE_TYPE; - } - - std::shared_ptr<ServerPort> GetServerPort() const; - - /** - * Creates a new Session pair, adds the created ServerSession to the associated ServerPort's - * list of pending sessions, and signals the ServerPort, causing any threads - * waiting on it to awake. - * @returns ClientSession The client endpoint of the created Session pair, or error code. - */ - ResultVal<std::shared_ptr<ClientSession>> Connect(); - - /** - * Signifies that a previously active connection has been closed, - * decreasing the total number of active connections to this port. - */ - void ConnectionClosed(); - - void Finalize() override {} - -private: - std::shared_ptr<ServerPort> server_port; ///< ServerPort associated with this client port. - u32 max_sessions = 0; ///< Maximum number of simultaneous sessions the port can have - u32 active_sessions = 0; ///< Number of currently open sessions to this port - std::string name; ///< Name of client port (optional) -}; - -} // namespace Kernel diff --git a/src/core/hle/kernel/client_session.cpp b/src/core/hle/kernel/client_session.cpp deleted file mode 100644 index e230f365a..000000000 --- a/src/core/hle/kernel/client_session.cpp +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2019 yuzu emulator team -// Licensed under GPLv2 or any later version -// Refer to the license.txt file included. - -#include "core/hle/kernel/client_session.h" -#include "core/hle/kernel/hle_ipc.h" -#include "core/hle/kernel/k_thread.h" -#include "core/hle/kernel/server_session.h" -#include "core/hle/kernel/session.h" -#include "core/hle/kernel/svc_results.h" -#include "core/hle/result.h" - -namespace Kernel { - -ClientSession::ClientSession(KernelCore& kernel) : KSynchronizationObject{kernel} {} - -ClientSession::~ClientSession() { - // This destructor will be called automatically when the last ClientSession handle is closed by - // the emulated application. - if (parent->Server()) { - parent->Server()->ClientDisconnected(); - } -} - -bool ClientSession::IsSignaled() const { - UNIMPLEMENTED(); - return true; -} - -ResultVal<std::shared_ptr<ClientSession>> ClientSession::Create(KernelCore& kernel, - std::shared_ptr<Session> parent, - std::string name) { - std::shared_ptr<ClientSession> client_session{std::make_shared<ClientSession>(kernel)}; - - client_session->name = std::move(name); - client_session->parent = std::move(parent); - - return MakeResult(std::move(client_session)); -} - -ResultCode ClientSession::SendSyncRequest(std::shared_ptr<KThread> thread, - Core::Memory::Memory& memory, - Core::Timing::CoreTiming& core_timing) { - // Keep ServerSession alive until we're done working with it. - if (!parent->Server()) { - return ResultSessionClosedByRemote; - } - - // Signal the server session that new data is available - return parent->Server()->HandleSyncRequest(std::move(thread), memory, core_timing); -} - -} // namespace Kernel diff --git a/src/core/hle/kernel/client_session.h b/src/core/hle/kernel/client_session.h deleted file mode 100644 index 85aafeaf4..000000000 --- a/src/core/hle/kernel/client_session.h +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2019 yuzu emulator team -// Licensed under GPLv2 or any later version -// Refer to the license.txt file included. - -#pragma once - -#include <memory> -#include <string> - -#include "core/hle/kernel/k_synchronization_object.h" -#include "core/hle/result.h" - -union ResultCode; - -namespace Core::Memory { -class Memory; -} - -namespace Core::Timing { -class CoreTiming; -} - -namespace Kernel { - -class KernelCore; -class Session; -class KThread; - -class ClientSession final : public KSynchronizationObject { -public: - explicit ClientSession(KernelCore& kernel); - ~ClientSession() override; - - friend class Session; - - std::string GetTypeName() const override { - return "ClientSession"; - } - - std::string GetName() const override { - return name; - } - - static constexpr HandleType HANDLE_TYPE = HandleType::ClientSession; - HandleType GetHandleType() const override { - return HANDLE_TYPE; - } - - ResultCode SendSyncRequest(std::shared_ptr<KThread> thread, Core::Memory::Memory& memory, - Core::Timing::CoreTiming& core_timing); - - bool IsSignaled() const override; - - void Finalize() override {} - -private: - static ResultVal<std::shared_ptr<ClientSession>> Create(KernelCore& kernel, - std::shared_ptr<Session> parent, - std::string name = "Unknown"); - - /// The parent session, which links to the server endpoint. - std::shared_ptr<Session> parent; - - /// Name of the client session (optional) - std::string name; -}; - -} // namespace Kernel diff --git a/src/core/hle/kernel/global_scheduler_context.cpp b/src/core/hle/kernel/global_scheduler_context.cpp index c6838649f..7c87cbada 100644 --- a/src/core/hle/kernel/global_scheduler_context.cpp +++ b/src/core/hle/kernel/global_scheduler_context.cpp @@ -17,12 +17,12 @@ GlobalSchedulerContext::GlobalSchedulerContext(KernelCore& kernel) GlobalSchedulerContext::~GlobalSchedulerContext() = default; -void GlobalSchedulerContext::AddThread(std::shared_ptr<KThread> thread) { +void GlobalSchedulerContext::AddThread(KThread* thread) { std::scoped_lock lock{global_list_guard}; - thread_list.push_back(std::move(thread)); + thread_list.push_back(thread); } -void GlobalSchedulerContext::RemoveThread(std::shared_ptr<KThread> thread) { +void GlobalSchedulerContext::RemoveThread(KThread* thread) { std::scoped_lock lock{global_list_guard}; thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread), thread_list.end()); diff --git a/src/core/hle/kernel/global_scheduler_context.h b/src/core/hle/kernel/global_scheduler_context.h index 11592843e..ba8b67fd1 100644 --- a/src/core/hle/kernel/global_scheduler_context.h +++ b/src/core/hle/kernel/global_scheduler_context.h @@ -38,13 +38,13 @@ public: ~GlobalSchedulerContext(); /// Adds a new thread to the scheduler - void AddThread(std::shared_ptr<KThread> thread); + void AddThread(KThread* thread); /// Removes a thread from the scheduler - void RemoveThread(std::shared_ptr<KThread> thread); + void RemoveThread(KThread* thread); /// Returns a list of all threads managed by the scheduler - [[nodiscard]] const std::vector<std::shared_ptr<KThread>>& GetThreadList() const { + [[nodiscard]] const std::vector<KThread*>& GetThreadList() const { return thread_list; } @@ -79,7 +79,7 @@ private: LockType scheduler_lock; /// Lists all thread ids that aren't deleted/etc. - std::vector<std::shared_ptr<KThread>> thread_list; + std::vector<KThread*> thread_list; Common::SpinLock global_list_guard{}; }; diff --git a/src/core/hle/kernel/handle_table.cpp b/src/core/hle/kernel/handle_table.cpp deleted file mode 100644 index f96d34078..000000000 --- a/src/core/hle/kernel/handle_table.cpp +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright 2014 Citra Emulator Project -// Licensed under GPLv2 or any later version -// Refer to the license.txt file included. - -#include <utility> -#include "common/assert.h" -#include "common/logging/log.h" -#include "core/core.h" -#include "core/hle/kernel/handle_table.h" -#include "core/hle/kernel/k_scheduler.h" -#include "core/hle/kernel/k_thread.h" -#include "core/hle/kernel/kernel.h" -#include "core/hle/kernel/process.h" -#include "core/hle/kernel/svc_results.h" - -namespace Kernel { -namespace { -constexpr u16 GetSlot(Handle handle) { - return static_cast<u16>(handle >> 15); -} - -constexpr u16 GetGeneration(Handle handle) { - return static_cast<u16>(handle & 0x7FFF); -} -} // Anonymous namespace - -HandleTable::HandleTable(KernelCore& kernel) : kernel{kernel} { - Clear(); -} - -HandleTable::~HandleTable() = default; - -ResultCode HandleTable::SetSize(s32 handle_table_size) { - if (static_cast<u32>(handle_table_size) > MAX_COUNT) { - LOG_ERROR(Kernel, "Handle table size {} is greater than {}", handle_table_size, MAX_COUNT); - return ResultOutOfMemory; - } - - // Values less than or equal to zero indicate to use the maximum allowable - // size for the handle table in the actual kernel, so we ignore the given - // value in that case, since we assume this by default unless this function - // is called. - if (handle_table_size > 0) { - table_size = static_cast<u16>(handle_table_size); - } - - return RESULT_SUCCESS; -} - -ResultVal<Handle> HandleTable::Create(std::shared_ptr<Object> obj) { - DEBUG_ASSERT(obj != nullptr); - - const u16 slot = next_free_slot; - if (slot >= table_size) { - LOG_ERROR(Kernel, "Unable to allocate Handle, too many slots in use."); - return ResultHandleTableFull; - } - next_free_slot = generations[slot]; - - const u16 generation = next_generation++; - - // Overflow count so it fits in the 15 bits dedicated to the generation in the handle. - // Horizon OS uses zero to represent an invalid handle, so skip to 1. - if (next_generation >= (1 << 15)) { - next_generation = 1; - } - - generations[slot] = generation; - objects[slot] = std::move(obj); - - Handle handle = generation | (slot << 15); - return MakeResult<Handle>(handle); -} - -ResultVal<Handle> HandleTable::Duplicate(Handle handle) { - std::shared_ptr<Object> object = GetGeneric(handle); - if (object == nullptr) { - LOG_ERROR(Kernel, "Tried to duplicate invalid handle: {:08X}", handle); - return ResultInvalidHandle; - } - return Create(std::move(object)); -} - -ResultCode HandleTable::Close(Handle handle) { - if (!IsValid(handle)) { - LOG_ERROR(Kernel, "Handle is not valid! handle={:08X}", handle); - return ResultInvalidHandle; - } - - const u16 slot = GetSlot(handle); - - if (objects[slot].use_count() == 1) { - objects[slot]->Finalize(); - } - - objects[slot] = nullptr; - - generations[slot] = next_free_slot; - next_free_slot = slot; - return RESULT_SUCCESS; -} - -bool HandleTable::IsValid(Handle handle) const { - const std::size_t slot = GetSlot(handle); - const u16 generation = GetGeneration(handle); - - return slot < table_size && objects[slot] != nullptr && generations[slot] == generation; -} - -std::shared_ptr<Object> HandleTable::GetGeneric(Handle handle) const { - if (handle == CurrentThread) { - return SharedFrom(kernel.CurrentScheduler()->GetCurrentThread()); - } else if (handle == CurrentProcess) { - return SharedFrom(kernel.CurrentProcess()); - } - - if (!IsValid(handle)) { - return nullptr; - } - return objects[GetSlot(handle)]; -} - -void HandleTable::Clear() { - for (u16 i = 0; i < table_size; ++i) { - generations[i] = static_cast<u16>(i + 1); - objects[i] = nullptr; - } - next_free_slot = 0; -} - -} // namespace Kernel diff --git a/src/core/hle/kernel/handle_table.h b/src/core/hle/kernel/handle_table.h deleted file mode 100644 index c9dab8cdd..000000000 --- a/src/core/hle/kernel/handle_table.h +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2014 Citra Emulator Project -// Licensed under GPLv2 or any later version -// Refer to the license.txt file included. - -#pragma once - -#include <array> -#include <cstddef> -#include <memory> - -#include "common/common_types.h" -#include "core/hle/kernel/object.h" -#include "core/hle/result.h" - -namespace Kernel { - -class KernelCore; - -enum KernelHandle : Handle { - InvalidHandle = 0, - CurrentThread = 0xFFFF8000, - CurrentProcess = 0xFFFF8001, -}; - -/** - * This class allows the creation of Handles, which are references to objects that can be tested - * for validity and looked up. Here they are used to pass references to kernel objects to/from the - * emulated process. it has been designed so that it follows the same handle format and has - * approximately the same restrictions as the handle manager in the CTR-OS. - * - * Handles contain two sub-fields: a slot index (bits 31:15) and a generation value (bits 14:0). - * The slot index is used to index into the arrays in this class to access the data corresponding - * to the Handle. - * - * To prevent accidental use of a freed Handle whose slot has already been reused, a global counter - * is kept and incremented every time a Handle is created. This is the Handle's "generation". The - * value of the counter is stored into the Handle as well as in the handle table (in the - * "generations" array). When looking up a handle, the Handle's generation must match with the - * value stored on the class, otherwise the Handle is considered invalid. - * - * To find free slots when allocating a Handle without needing to scan the entire object array, the - * generations field of unallocated slots is re-purposed as a linked list of indices to free slots. - * When a Handle is created, an index is popped off the list and used for the new Handle. When it - * is destroyed, it is again pushed onto the list to be re-used by the next allocation. It is - * likely that this allocation strategy differs from the one used in CTR-OS, but this hasn't been - * verified and isn't likely to cause any problems. - */ -class HandleTable final : NonCopyable { -public: - /// This is the maximum limit of handles allowed per process in Horizon - static constexpr std::size_t MAX_COUNT = 1024; - - explicit HandleTable(KernelCore& kernel); - ~HandleTable(); - - /** - * Sets the number of handles that may be in use at one time - * for this handle table. - * - * @param handle_table_size The desired size to limit the handle table to. - * - * @returns an error code indicating if initialization was successful. - * If initialization was not successful, then ERR_OUT_OF_MEMORY - * will be returned. - * - * @pre handle_table_size must be within the range [0, 1024] - */ - ResultCode SetSize(s32 handle_table_size); - - /** - * Allocates a handle for the given object. - * @return The created Handle or one of the following errors: - * - `ERR_HANDLE_TABLE_FULL`: the maximum number of handles has been exceeded. - */ - ResultVal<Handle> Create(std::shared_ptr<Object> obj); - - /** - * Returns a new handle that points to the same object as the passed in handle. - * @return The duplicated Handle or one of the following errors: - * - `ERR_INVALID_HANDLE`: an invalid handle was passed in. - * - Any errors returned by `Create()`. - */ - ResultVal<Handle> Duplicate(Handle handle); - - /** - * Closes a handle, removing it from the table and decreasing the object's ref-count. - * @return `RESULT_SUCCESS` or one of the following errors: - * - `ERR_INVALID_HANDLE`: an invalid handle was passed in. - */ - ResultCode Close(Handle handle); - - /// Checks if a handle is valid and points to an existing object. - bool IsValid(Handle handle) const; - - /** - * Looks up a handle. - * @return Pointer to the looked-up object, or `nullptr` if the handle is not valid. - */ - std::shared_ptr<Object> GetGeneric(Handle handle) const; - - /** - * Looks up a handle while verifying its type. - * @return Pointer to the looked-up object, or `nullptr` if the handle is not valid or its - * type differs from the requested one. - */ - template <class T> - std::shared_ptr<T> Get(Handle handle) const { - return DynamicObjectCast<T>(GetGeneric(handle)); - } - - /// Closes all handles held in this table. - void Clear(); - -private: - /// Stores the Object referenced by the handle or null if the slot is empty. - std::array<std::shared_ptr<Object>, MAX_COUNT> objects; - - /** - * The value of `next_generation` when the handle was created, used to check for validity. For - * empty slots, contains the index of the next free slot in the list. - */ - std::array<u16, MAX_COUNT> generations; - - /** - * The limited size of the handle table. This can be specified by process - * capabilities in order to restrict the overall number of handles that - * can be created in a process instance - */ - u16 table_size = static_cast<u16>(MAX_COUNT); - - /** - * Global counter of the number of created handles. Stored in `generations` when a handle is - * created, and wraps around to 1 when it hits 0x8000. - */ - u16 next_generation = 1; - - /// Head of the free slots linked list. - u16 next_free_slot = 0; - - /// Underlying kernel instance that this handle table operates under. - KernelCore& kernel; -}; - -} // namespace Kernel diff --git a/src/core/hle/kernel/hle_ipc.cpp b/src/core/hle/kernel/hle_ipc.cpp index 2b363b1d9..b505d20a6 100644 --- a/src/core/hle/kernel/hle_ipc.cpp +++ b/src/core/hle/kernel/hle_ipc.cpp @@ -14,17 +14,16 @@ #include "common/common_types.h" #include "common/logging/log.h" #include "core/hle/ipc_helpers.h" -#include "core/hle/kernel/handle_table.h" #include "core/hle/kernel/hle_ipc.h" +#include "core/hle/kernel/k_handle_table.h" +#include "core/hle/kernel/k_process.h" #include "core/hle/kernel/k_readable_event.h" #include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" +#include "core/hle/kernel/k_server_session.h" #include "core/hle/kernel/k_thread.h" #include "core/hle/kernel/k_writable_event.h" #include "core/hle/kernel/kernel.h" -#include "core/hle/kernel/object.h" -#include "core/hle/kernel/process.h" -#include "core/hle/kernel/server_session.h" #include "core/hle/kernel/svc_results.h" #include "core/hle/kernel/time_manager.h" #include "core/memory.h" @@ -35,28 +34,23 @@ SessionRequestHandler::SessionRequestHandler() = default; SessionRequestHandler::~SessionRequestHandler() = default; -void SessionRequestHandler::ClientConnected(std::shared_ptr<ServerSession> server_session) { - server_session->SetHleHandler(shared_from_this()); - connected_sessions.push_back(std::move(server_session)); +void SessionRequestHandler::ClientConnected(KServerSession* session) { + session->SetHleHandler(shared_from_this()); } -void SessionRequestHandler::ClientDisconnected( - const std::shared_ptr<ServerSession>& server_session) { - server_session->SetHleHandler(nullptr); - boost::range::remove_erase(connected_sessions, server_session); +void SessionRequestHandler::ClientDisconnected(KServerSession* session) { + session->SetHleHandler(nullptr); } -HLERequestContext::HLERequestContext(KernelCore& kernel, Core::Memory::Memory& memory, - std::shared_ptr<ServerSession> server_session, - std::shared_ptr<KThread> thread) - : server_session(std::move(server_session)), - thread(std::move(thread)), kernel{kernel}, memory{memory} { +HLERequestContext::HLERequestContext(KernelCore& kernel_, Core::Memory::Memory& memory_, + KServerSession* server_session_, KThread* thread_) + : server_session(server_session_), thread(thread_), kernel{kernel_}, memory{memory_} { cmd_buf[0] = 0; } HLERequestContext::~HLERequestContext() = default; -void HLERequestContext::ParseCommandBuffer(const HandleTable& handle_table, u32_le* src_cmdbuf, +void HLERequestContext::ParseCommandBuffer(const KHandleTable& handle_table, u32_le* src_cmdbuf, bool incoming) { IPC::RequestParser rp(src_cmdbuf); command_header = rp.PopRaw<IPC::CommandHeader>(); @@ -77,12 +71,12 @@ void HLERequestContext::ParseCommandBuffer(const HandleTable& handle_table, u32_ for (u32 handle = 0; handle < handle_descriptor_header->num_handles_to_copy; ++handle) { const u32 copy_handle{rp.Pop<Handle>()}; copy_handles.push_back(copy_handle); - copy_objects.push_back(handle_table.GetGeneric(copy_handle)); + copy_objects.push_back(handle_table.GetObject(copy_handle).GetPointerUnsafe()); } for (u32 handle = 0; handle < handle_descriptor_header->num_handles_to_move; ++handle) { const u32 move_handle{rp.Pop<Handle>()}; move_handles.push_back(move_handle); - move_objects.push_back(handle_table.GetGeneric(move_handle)); + move_objects.push_back(handle_table.GetObject(move_handle).GetPointerUnsafe()); } } else { // For responses we just ignore the handles, they're empty and will be populated when @@ -169,7 +163,7 @@ void HLERequestContext::ParseCommandBuffer(const HandleTable& handle_table, u32_ rp.Skip(1, false); // The command is actually an u64, but we don't use the high part. } -ResultCode HLERequestContext::PopulateFromIncomingCommandBuffer(const HandleTable& handle_table, +ResultCode HLERequestContext::PopulateFromIncomingCommandBuffer(const KHandleTable& handle_table, u32_le* src_cmdbuf) { ParseCommandBuffer(handle_table, src_cmdbuf, true); if (command_header->type == IPC::CommandType::Close) { @@ -223,12 +217,12 @@ ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(KThread& thread) { // for specific values in each of these descriptors. for (auto& object : copy_objects) { ASSERT(object != nullptr); - dst_cmdbuf[current_offset++] = handle_table.Create(object).Unwrap(); + R_TRY(handle_table.Add(&dst_cmdbuf[current_offset++], object)); } for (auto& object : move_objects) { ASSERT(object != nullptr); - dst_cmdbuf[current_offset++] = handle_table.Create(object).Unwrap(); + R_TRY(handle_table.Add(&dst_cmdbuf[current_offset++], object)); } } diff --git a/src/core/hle/kernel/hle_ipc.h b/src/core/hle/kernel/hle_ipc.h index 6fba42615..fa031c121 100644 --- a/src/core/hle/kernel/hle_ipc.h +++ b/src/core/hle/kernel/hle_ipc.h @@ -16,7 +16,8 @@ #include "common/concepts.h" #include "common/swap.h" #include "core/hle/ipc.h" -#include "core/hle/kernel/object.h" +#include "core/hle/kernel/k_auto_object.h" +#include "core/hle/kernel/svc_common.h" union ResultCode; @@ -35,13 +36,14 @@ class ServiceFrameworkBase; namespace Kernel { class Domain; -class HandleTable; class HLERequestContext; class KernelCore; -class Process; -class ServerSession; +class KHandleTable; +class KProcess; +class KServerSession; class KThread; class KReadableEvent; +class KSession; class KWritableEvent; enum class ThreadWakeupReason; @@ -71,20 +73,14 @@ public: * associated ServerSession alive for the duration of the connection. * @param server_session Owning pointer to the ServerSession associated with the connection. */ - void ClientConnected(std::shared_ptr<ServerSession> server_session); + void ClientConnected(KServerSession* session); /** * Signals that a client has just disconnected from this HLE handler and releases the * associated ServerSession. * @param server_session ServerSession associated with the connection. */ - void ClientDisconnected(const std::shared_ptr<ServerSession>& server_session); - -protected: - /// List of sessions that are connected to this handler. - /// A ServerSession whose server endpoint is an HLE implementation is kept alive by this list - /// for the duration of the connection. - std::vector<std::shared_ptr<ServerSession>> connected_sessions; + void ClientDisconnected(KServerSession* session); }; /** @@ -109,8 +105,7 @@ protected: class HLERequestContext { public: explicit HLERequestContext(KernelCore& kernel, Core::Memory::Memory& memory, - std::shared_ptr<ServerSession> session, - std::shared_ptr<KThread> thread); + KServerSession* session, KThread* thread); ~HLERequestContext(); /// Returns a pointer to the IPC command buffer for this request. @@ -122,12 +117,12 @@ public: * Returns the session through which this request was made. This can be used as a map key to * access per-client data on services. */ - const std::shared_ptr<Kernel::ServerSession>& Session() const { + Kernel::KServerSession* Session() { return server_session; } /// Populates this context with data from the requesting process/thread. - ResultCode PopulateFromIncomingCommandBuffer(const HandleTable& handle_table, + ResultCode PopulateFromIncomingCommandBuffer(const KHandleTable& handle_table, u32_le* src_cmdbuf); /// Writes data from this context back to the requesting process/thread. @@ -218,22 +213,12 @@ public: return move_handles.at(index); } - template <typename T> - std::shared_ptr<T> GetCopyObject(std::size_t index) { - return DynamicObjectCast<T>(copy_objects.at(index)); - } - - template <typename T> - std::shared_ptr<T> GetMoveObject(std::size_t index) { - return DynamicObjectCast<T>(move_objects.at(index)); + void AddMoveObject(KAutoObject* object) { + move_objects.emplace_back(object); } - void AddMoveObject(std::shared_ptr<Object> object) { - move_objects.emplace_back(std::move(object)); - } - - void AddCopyObject(std::shared_ptr<Object> object) { - copy_objects.emplace_back(std::move(object)); + void AddCopyObject(KAutoObject* object) { + copy_objects.emplace_back(object); } void AddDomainObject(std::shared_ptr<SessionRequestHandler> object) { @@ -276,10 +261,6 @@ public: return *thread; } - const KThread& GetThread() const { - return *thread; - } - bool IsThreadWaiting() const { return is_thread_waiting; } @@ -287,16 +268,17 @@ public: private: friend class IPC::ResponseBuilder; - void ParseCommandBuffer(const HandleTable& handle_table, u32_le* src_cmdbuf, bool incoming); + void ParseCommandBuffer(const KHandleTable& handle_table, u32_le* src_cmdbuf, bool incoming); std::array<u32, IPC::COMMAND_BUFFER_LENGTH> cmd_buf; - std::shared_ptr<Kernel::ServerSession> server_session; - std::shared_ptr<KThread> thread; + Kernel::KServerSession* server_session{}; + KThread* thread; + // TODO(yuriks): Check common usage of this and optimize size accordingly boost::container::small_vector<Handle, 8> move_handles; boost::container::small_vector<Handle, 8> copy_handles; - boost::container::small_vector<std::shared_ptr<Object>, 8> move_objects; - boost::container::small_vector<std::shared_ptr<Object>, 8> copy_objects; + boost::container::small_vector<KAutoObject*, 8> move_objects; + boost::container::small_vector<KAutoObject*, 8> copy_objects; boost::container::small_vector<std::shared_ptr<SessionRequestHandler>, 8> domain_objects; std::optional<IPC::CommandHeader> command_header; diff --git a/src/core/hle/kernel/init/init_slab_setup.cpp b/src/core/hle/kernel/init/init_slab_setup.cpp new file mode 100644 index 000000000..69ae405e6 --- /dev/null +++ b/src/core/hle/kernel/init/init_slab_setup.cpp @@ -0,0 +1,192 @@ +// Copyright 2021 yuzu emulator team +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "common/alignment.h" +#include "common/assert.h" +#include "common/common_funcs.h" +#include "common/common_types.h" +#include "core/core.h" +#include "core/hardware_properties.h" +#include "core/hle/kernel/init/init_slab_setup.h" +#include "core/hle/kernel/k_event.h" +#include "core/hle/kernel/k_memory_layout.h" +#include "core/hle/kernel/k_memory_manager.h" +#include "core/hle/kernel/k_port.h" +#include "core/hle/kernel/k_process.h" +#include "core/hle/kernel/k_resource_limit.h" +#include "core/hle/kernel/k_session.h" +#include "core/hle/kernel/k_shared_memory.h" +#include "core/hle/kernel/k_system_control.h" +#include "core/hle/kernel/k_thread.h" +#include "core/hle/kernel/k_transfer_memory.h" +#include "core/hle/kernel/memory_types.h" +#include "core/memory.h" + +namespace Kernel::Init { + +#define SLAB_COUNT(CLASS) kernel.SlabResourceCounts().num_##CLASS + +#define FOREACH_SLAB_TYPE(HANDLER, ...) \ + HANDLER(KProcess, (SLAB_COUNT(KProcess)), ##__VA_ARGS__) \ + HANDLER(KThread, (SLAB_COUNT(KThread)), ##__VA_ARGS__) \ + HANDLER(KEvent, (SLAB_COUNT(KEvent)), ##__VA_ARGS__) \ + HANDLER(KPort, (SLAB_COUNT(KPort)), ##__VA_ARGS__) \ + HANDLER(KSharedMemory, (SLAB_COUNT(KSharedMemory)), ##__VA_ARGS__) \ + HANDLER(KTransferMemory, (SLAB_COUNT(KTransferMemory)), ##__VA_ARGS__) \ + HANDLER(KSession, (SLAB_COUNT(KSession)), ##__VA_ARGS__) \ + HANDLER(KResourceLimit, (SLAB_COUNT(KResourceLimit)), ##__VA_ARGS__) + +namespace { + +#define DEFINE_SLAB_TYPE_ENUM_MEMBER(NAME, COUNT, ...) KSlabType_##NAME, + +enum KSlabType : u32 { + FOREACH_SLAB_TYPE(DEFINE_SLAB_TYPE_ENUM_MEMBER) KSlabType_Count, +}; + +#undef DEFINE_SLAB_TYPE_ENUM_MEMBER + +// Constexpr counts. +constexpr size_t SlabCountKProcess = 80; +constexpr size_t SlabCountKThread = 800; +constexpr size_t SlabCountKEvent = 700; +constexpr size_t SlabCountKInterruptEvent = 100; +constexpr size_t SlabCountKPort = 256 + 0x20; // Extra 0x20 ports over Nintendo for homebrew. +constexpr size_t SlabCountKSharedMemory = 80; +constexpr size_t SlabCountKTransferMemory = 200; +constexpr size_t SlabCountKCodeMemory = 10; +constexpr size_t SlabCountKDeviceAddressSpace = 300; +constexpr size_t SlabCountKSession = 933; +constexpr size_t SlabCountKLightSession = 100; +constexpr size_t SlabCountKObjectName = 7; +constexpr size_t SlabCountKResourceLimit = 5; +constexpr size_t SlabCountKDebug = Core::Hardware::NUM_CPU_CORES; +constexpr size_t SlabCountKAlpha = 1; +constexpr size_t SlabCountKBeta = 6; + +constexpr size_t SlabCountExtraKThread = 160; + +template <typename T> +VAddr InitializeSlabHeap(Core::System& system, KMemoryLayout& memory_layout, VAddr address, + size_t num_objects) { + const size_t size = Common::AlignUp(sizeof(T) * num_objects, alignof(void*)); + VAddr start = Common::AlignUp(address, alignof(T)); + + if (size > 0) { + const KMemoryRegion* region = memory_layout.FindVirtual(start + size - 1); + ASSERT(region != nullptr); + ASSERT(region->IsDerivedFrom(KMemoryRegionType_KernelSlab)); + T::InitializeSlabHeap(system.Kernel(), system.Memory().GetKernelBuffer(start, size), size); + } + + return start + size; +} + +} // namespace + +KSlabResourceCounts KSlabResourceCounts::CreateDefault() { + return { + .num_KProcess = SlabCountKProcess, + .num_KThread = SlabCountKThread, + .num_KEvent = SlabCountKEvent, + .num_KInterruptEvent = SlabCountKInterruptEvent, + .num_KPort = SlabCountKPort, + .num_KSharedMemory = SlabCountKSharedMemory, + .num_KTransferMemory = SlabCountKTransferMemory, + .num_KCodeMemory = SlabCountKCodeMemory, + .num_KDeviceAddressSpace = SlabCountKDeviceAddressSpace, + .num_KSession = SlabCountKSession, + .num_KLightSession = SlabCountKLightSession, + .num_KObjectName = SlabCountKObjectName, + .num_KResourceLimit = SlabCountKResourceLimit, + .num_KDebug = SlabCountKDebug, + .num_KAlpha = SlabCountKAlpha, + .num_KBeta = SlabCountKBeta, + }; +} + +void InitializeSlabResourceCounts(KernelCore& kernel) { + kernel.SlabResourceCounts() = KSlabResourceCounts::CreateDefault(); + if (KSystemControl::Init::ShouldIncreaseThreadResourceLimit()) { + kernel.SlabResourceCounts().num_KThread += SlabCountExtraKThread; + } +} + +size_t CalculateTotalSlabHeapSize(const KernelCore& kernel) { + size_t size = 0; + +#define ADD_SLAB_SIZE(NAME, COUNT, ...) \ + { \ + size += alignof(NAME); \ + size += Common::AlignUp(sizeof(NAME) * (COUNT), alignof(void*)); \ + }; + + // Add the size required for each slab. + FOREACH_SLAB_TYPE(ADD_SLAB_SIZE) + +#undef ADD_SLAB_SIZE + + // Add the reserved size. + size += KernelSlabHeapGapsSize; + + return size; +} + +void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout) { + auto& kernel = system.Kernel(); + + // Get the start of the slab region, since that's where we'll be working. + VAddr address = memory_layout.GetSlabRegionAddress(); + + // Initialize slab type array to be in sorted order. + std::array<KSlabType, KSlabType_Count> slab_types; + for (size_t i = 0; i < slab_types.size(); i++) { + slab_types[i] = static_cast<KSlabType>(i); + } + + // N shuffles the slab type array with the following simple algorithm. + for (size_t i = 0; i < slab_types.size(); i++) { + const size_t rnd = KSystemControl::GenerateRandomRange(0, slab_types.size() - 1); + std::swap(slab_types[i], slab_types[rnd]); + } + + // Create an array to represent the gaps between the slabs. + const size_t total_gap_size = KernelSlabHeapGapsSize; + std::array<size_t, slab_types.size()> slab_gaps; + for (size_t i = 0; i < slab_gaps.size(); i++) { + // Note: This is an off-by-one error from Nintendo's intention, because GenerateRandomRange + // is inclusive. However, Nintendo also has the off-by-one error, and it's "harmless", so we + // will include it ourselves. + slab_gaps[i] = KSystemControl::GenerateRandomRange(0, total_gap_size); + } + + // Sort the array, so that we can treat differences between values as offsets to the starts of + // slabs. + for (size_t i = 1; i < slab_gaps.size(); i++) { + for (size_t j = i; j > 0 && slab_gaps[j - 1] > slab_gaps[j]; j--) { + std::swap(slab_gaps[j], slab_gaps[j - 1]); + } + } + + for (size_t i = 0; i < slab_types.size(); i++) { + // Add the random gap to the address. + address += (i == 0) ? slab_gaps[0] : slab_gaps[i] - slab_gaps[i - 1]; + +#define INITIALIZE_SLAB_HEAP(NAME, COUNT, ...) \ + case KSlabType_##NAME: \ + address = InitializeSlabHeap<NAME>(system, memory_layout, address, COUNT); \ + break; + + // Initialize the slabheap. + switch (slab_types[i]) { + // For each of the slab types, we want to initialize that heap. + FOREACH_SLAB_TYPE(INITIALIZE_SLAB_HEAP) + // If we somehow get an invalid type, abort. + default: + UNREACHABLE(); + } + } +} + +} // namespace Kernel::Init diff --git a/src/core/hle/kernel/init/init_slab_setup.h b/src/core/hle/kernel/init/init_slab_setup.h new file mode 100644 index 000000000..a8f7e0918 --- /dev/null +++ b/src/core/hle/kernel/init/init_slab_setup.h @@ -0,0 +1,43 @@ +// Copyright 2021 yuzu emulator team +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +namespace Core { +class System; +} // namespace Core + +namespace Kernel { +class KernelCore; +class KMemoryLayout; +} // namespace Kernel + +namespace Kernel::Init { + +struct KSlabResourceCounts { + static KSlabResourceCounts CreateDefault(); + + size_t num_KProcess; + size_t num_KThread; + size_t num_KEvent; + size_t num_KInterruptEvent; + size_t num_KPort; + size_t num_KSharedMemory; + size_t num_KTransferMemory; + size_t num_KCodeMemory; + size_t num_KDeviceAddressSpace; + size_t num_KSession; + size_t num_KLightSession; + size_t num_KObjectName; + size_t num_KResourceLimit; + size_t num_KDebug; + size_t num_KAlpha; + size_t num_KBeta; +}; + +void InitializeSlabResourceCounts(KernelCore& kernel); +size_t CalculateTotalSlabHeapSize(const KernelCore& kernel); +void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout); + +} // namespace Kernel::Init diff --git a/src/core/hle/kernel/k_auto_object.cpp b/src/core/hle/kernel/k_auto_object.cpp new file mode 100644 index 000000000..dbe237f09 --- /dev/null +++ b/src/core/hle/kernel/k_auto_object.cpp @@ -0,0 +1,14 @@ +// Copyright 2021 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "core/hle/kernel/k_auto_object.h" + +namespace Kernel { + +KAutoObject* KAutoObject::Create(KAutoObject* obj) { + obj->m_ref_count = 1; + return obj; +} + +} // namespace Kernel diff --git a/src/core/hle/kernel/k_auto_object.h b/src/core/hle/kernel/k_auto_object.h new file mode 100644 index 000000000..765e46670 --- /dev/null +++ b/src/core/hle/kernel/k_auto_object.h @@ -0,0 +1,306 @@ +// Copyright 2021 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include <atomic> +#include <string> + +#include "common/assert.h" +#include "common/common_funcs.h" +#include "common/common_types.h" +#include "common/intrusive_red_black_tree.h" +#include "core/hle/kernel/k_class_token.h" + +namespace Kernel { + +class KernelCore; +class KProcess; + +#define KERNEL_AUTOOBJECT_TRAITS(CLASS, BASE_CLASS) \ + YUZU_NON_COPYABLE(CLASS); \ + YUZU_NON_MOVEABLE(CLASS); \ + \ +private: \ + friend class ::Kernel::KClassTokenGenerator; \ + static constexpr inline auto ObjectType = ::Kernel::KClassTokenGenerator::ObjectType::CLASS; \ + static constexpr inline const char* const TypeName = #CLASS; \ + static constexpr inline ClassTokenType ClassToken() { \ + return ::Kernel::ClassToken<CLASS>; \ + } \ + \ +public: \ + using BaseClass = BASE_CLASS; \ + static constexpr TypeObj GetStaticTypeObj() { \ + constexpr ClassTokenType Token = ClassToken(); \ + return TypeObj(TypeName, Token); \ + } \ + static constexpr const char* GetStaticTypeName() { \ + return TypeName; \ + } \ + virtual TypeObj GetTypeObj() const { \ + return GetStaticTypeObj(); \ + } \ + virtual const char* GetTypeName() const { \ + return GetStaticTypeName(); \ + } \ + \ +private: \ + constexpr bool operator!=(const TypeObj& rhs) + +class KAutoObject { +protected: + class TypeObj { + public: + constexpr explicit TypeObj(const char* n, ClassTokenType tok) + : m_name(n), m_class_token(tok) {} + + constexpr const char* GetName() const { + return m_name; + } + constexpr ClassTokenType GetClassToken() const { + return m_class_token; + } + + constexpr bool operator==(const TypeObj& rhs) const { + return this->GetClassToken() == rhs.GetClassToken(); + } + + constexpr bool operator!=(const TypeObj& rhs) const { + return this->GetClassToken() != rhs.GetClassToken(); + } + + constexpr bool IsDerivedFrom(const TypeObj& rhs) const { + return (this->GetClassToken() | rhs.GetClassToken()) == this->GetClassToken(); + } + + private: + const char* m_name; + ClassTokenType m_class_token; + }; + +private: + KERNEL_AUTOOBJECT_TRAITS(KAutoObject, KAutoObject); + +public: + explicit KAutoObject(KernelCore& kernel_) : kernel(kernel_) {} + virtual ~KAutoObject() = default; + + static KAutoObject* Create(KAutoObject* ptr); + + // Destroy is responsible for destroying the auto object's resources when ref_count hits zero. + virtual void Destroy() { + UNIMPLEMENTED(); + } + + // Finalize is responsible for cleaning up resource, but does not destroy the object. + virtual void Finalize() {} + + virtual KProcess* GetOwner() const { + return nullptr; + } + + u32 GetReferenceCount() const { + return m_ref_count.load(); + } + + bool IsDerivedFrom(const TypeObj& rhs) const { + return this->GetTypeObj().IsDerivedFrom(rhs); + } + + bool IsDerivedFrom(const KAutoObject& rhs) const { + return this->IsDerivedFrom(rhs.GetTypeObj()); + } + + template <typename Derived> + Derived DynamicCast() { + static_assert(std::is_pointer_v<Derived>); + using DerivedType = std::remove_pointer_t<Derived>; + + if (this->IsDerivedFrom(DerivedType::GetStaticTypeObj())) { + return static_cast<Derived>(this); + } else { + return nullptr; + } + } + + template <typename Derived> + const Derived DynamicCast() const { + static_assert(std::is_pointer_v<Derived>); + using DerivedType = std::remove_pointer_t<Derived>; + + if (this->IsDerivedFrom(DerivedType::GetStaticTypeObj())) { + return static_cast<Derived>(this); + } else { + return nullptr; + } + } + + bool Open() { + // Atomically increment the reference count, only if it's positive. + u32 cur_ref_count = m_ref_count.load(std::memory_order_acquire); + do { + if (cur_ref_count == 0) { + return false; + } + ASSERT(cur_ref_count < cur_ref_count + 1); + } while (!m_ref_count.compare_exchange_weak(cur_ref_count, cur_ref_count + 1, + std::memory_order_relaxed)); + + return true; + } + + void Close() { + // Atomically decrement the reference count, not allowing it to become negative. + u32 cur_ref_count = m_ref_count.load(std::memory_order_acquire); + do { + ASSERT(cur_ref_count > 0); + } while (!m_ref_count.compare_exchange_weak(cur_ref_count, cur_ref_count - 1, + std::memory_order_relaxed)); + + // If ref count hits zero, destroy the object. + if (cur_ref_count - 1 == 0) { + this->Destroy(); + } + } + +protected: + KernelCore& kernel; + std::string name; + +private: + std::atomic<u32> m_ref_count{}; +}; + +class KAutoObjectWithListContainer; + +class KAutoObjectWithList : public KAutoObject { +public: + explicit KAutoObjectWithList(KernelCore& kernel_) : KAutoObject(kernel_), kernel(kernel_) {} + + static int Compare(const KAutoObjectWithList& lhs, const KAutoObjectWithList& rhs) { + const u64 lid = lhs.GetId(); + const u64 rid = rhs.GetId(); + + if (lid < rid) { + return -1; + } else if (lid > rid) { + return 1; + } else { + return 0; + } + } + +public: + virtual u64 GetId() const { + return reinterpret_cast<u64>(this); + } + + virtual const std::string& GetName() const { + return name; + } + +private: + friend class KAutoObjectWithListContainer; + +private: + Common::IntrusiveRedBlackTreeNode list_node; + +protected: + KernelCore& kernel; +}; + +template <typename T> +class KScopedAutoObject { + YUZU_NON_COPYABLE(KScopedAutoObject); + +public: + constexpr KScopedAutoObject() = default; + + constexpr KScopedAutoObject(T* o) : m_obj(o) { + if (m_obj != nullptr) { + m_obj->Open(); + } + } + + ~KScopedAutoObject() { + if (m_obj != nullptr) { + m_obj->Close(); + } + m_obj = nullptr; + } + + template <typename U> + requires(std::derived_from<T, U> || + std::derived_from<U, T>) constexpr KScopedAutoObject(KScopedAutoObject<U>&& rhs) { + if constexpr (std::derived_from<U, T>) { + // Upcast. + m_obj = rhs.m_obj; + rhs.m_obj = nullptr; + } else { + // Downcast. + T* derived = nullptr; + if (rhs.m_obj != nullptr) { + derived = rhs.m_obj->template DynamicCast<T*>(); + if (derived == nullptr) { + rhs.m_obj->Close(); + } + } + + m_obj = derived; + rhs.m_obj = nullptr; + } + } + + constexpr KScopedAutoObject<T>& operator=(KScopedAutoObject<T>&& rhs) { + rhs.Swap(*this); + return *this; + } + + constexpr T* operator->() { + return m_obj; + } + constexpr T& operator*() { + return *m_obj; + } + + constexpr void Reset(T* o) { + KScopedAutoObject(o).Swap(*this); + } + + constexpr T* GetPointerUnsafe() { + return m_obj; + } + + constexpr T* GetPointerUnsafe() const { + return m_obj; + } + + constexpr T* ReleasePointerUnsafe() { + T* ret = m_obj; + m_obj = nullptr; + return ret; + } + + constexpr bool IsNull() const { + return m_obj == nullptr; + } + constexpr bool IsNotNull() const { + return m_obj != nullptr; + } + +private: + template <typename U> + friend class KScopedAutoObject; + +private: + T* m_obj{}; + +private: + constexpr void Swap(KScopedAutoObject& rhs) noexcept { + std::swap(m_obj, rhs.m_obj); + } +}; + +} // namespace Kernel diff --git a/src/core/hle/kernel/k_auto_object_container.cpp b/src/core/hle/kernel/k_auto_object_container.cpp new file mode 100644 index 000000000..fc0c28874 --- /dev/null +++ b/src/core/hle/kernel/k_auto_object_container.cpp @@ -0,0 +1,28 @@ +// Copyright 2021 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "core/hle/kernel/k_auto_object_container.h" + +namespace Kernel { + +void KAutoObjectWithListContainer::Register(KAutoObjectWithList* obj) { + KScopedLightLock lk(m_lock); + + m_object_list.insert(*obj); +} + +void KAutoObjectWithListContainer::Unregister(KAutoObjectWithList* obj) { + KScopedLightLock lk(m_lock); + + m_object_list.erase(m_object_list.iterator_to(*obj)); +} + +size_t KAutoObjectWithListContainer::GetOwnedCount(KProcess* owner) { + KScopedLightLock lk(m_lock); + + return std::count_if(m_object_list.begin(), m_object_list.end(), + [&](const auto& obj) { return obj.GetOwner() == owner; }); +} + +} // namespace Kernel diff --git a/src/core/hle/kernel/k_auto_object_container.h b/src/core/hle/kernel/k_auto_object_container.h new file mode 100644 index 000000000..ff40cf5a7 --- /dev/null +++ b/src/core/hle/kernel/k_auto_object_container.h @@ -0,0 +1,70 @@ +// Copyright 2021 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include <atomic> + +#include "common/assert.h" +#include "common/common_funcs.h" +#include "common/common_types.h" +#include "common/intrusive_red_black_tree.h" +#include "core/hle/kernel/k_auto_object.h" +#include "core/hle/kernel/k_light_lock.h" + +namespace Kernel { + +class KernelCore; +class KProcess; + +class KAutoObjectWithListContainer { + YUZU_NON_COPYABLE(KAutoObjectWithListContainer); + YUZU_NON_MOVEABLE(KAutoObjectWithListContainer); + +public: + using ListType = Common::IntrusiveRedBlackTreeMemberTraits< + &KAutoObjectWithList::list_node>::TreeType<KAutoObjectWithList>; + +public: + class ListAccessor : public KScopedLightLock { + public: + explicit ListAccessor(KAutoObjectWithListContainer* container) + : KScopedLightLock(container->m_lock), m_list(container->m_object_list) {} + explicit ListAccessor(KAutoObjectWithListContainer& container) + : KScopedLightLock(container.m_lock), m_list(container.m_object_list) {} + + typename ListType::iterator begin() const { + return m_list.begin(); + } + + typename ListType::iterator end() const { + return m_list.end(); + } + + typename ListType::iterator find(typename ListType::const_reference ref) const { + return m_list.find(ref); + } + + private: + ListType& m_list; + }; + + friend class ListAccessor; + +public: + KAutoObjectWithListContainer(KernelCore& kernel) : m_lock(kernel), m_object_list() {} + + void Initialize() {} + void Finalize() {} + + void Register(KAutoObjectWithList* obj); + void Unregister(KAutoObjectWithList* obj); + size_t GetOwnedCount(KProcess* owner); + +private: + KLightLock m_lock; + ListType m_object_list; +}; + +} // namespace Kernel diff --git a/src/core/hle/kernel/k_class_token.cpp b/src/core/hle/kernel/k_class_token.cpp new file mode 100644 index 000000000..beb8a2a05 --- /dev/null +++ b/src/core/hle/kernel/k_class_token.cpp @@ -0,0 +1,133 @@ +// Copyright 2021 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "core/hle/kernel/k_auto_object.h" +#include "core/hle/kernel/k_class_token.h" +#include "core/hle/kernel/k_client_port.h" +#include "core/hle/kernel/k_client_session.h" +#include "core/hle/kernel/k_event.h" +#include "core/hle/kernel/k_port.h" +#include "core/hle/kernel/k_process.h" +#include "core/hle/kernel/k_readable_event.h" +#include "core/hle/kernel/k_resource_limit.h" +#include "core/hle/kernel/k_server_port.h" +#include "core/hle/kernel/k_server_session.h" +#include "core/hle/kernel/k_session.h" +#include "core/hle/kernel/k_shared_memory.h" +#include "core/hle/kernel/k_synchronization_object.h" +#include "core/hle/kernel/k_thread.h" +#include "core/hle/kernel/k_transfer_memory.h" +#include "core/hle/kernel/k_writable_event.h" + +namespace Kernel { + +// Ensure that we generate correct class tokens for all types. + +// Ensure that the absolute token values are correct. +static_assert(ClassToken<KAutoObject> == 0b00000000'00000000); +static_assert(ClassToken<KSynchronizationObject> == 0b00000000'00000001); +static_assert(ClassToken<KReadableEvent> == 0b00000000'00000011); +// static_assert(ClassToken<KInterruptEvent> == 0b00000111'00000011); +// static_assert(ClassToken<KDebug> == 0b00001011'00000001); +static_assert(ClassToken<KThread> == 0b00010011'00000001); +static_assert(ClassToken<KServerPort> == 0b00100011'00000001); +static_assert(ClassToken<KServerSession> == 0b01000011'00000001); +static_assert(ClassToken<KClientPort> == 0b10000011'00000001); +static_assert(ClassToken<KClientSession> == 0b00001101'00000000); +static_assert(ClassToken<KProcess> == 0b00010101'00000001); +static_assert(ClassToken<KResourceLimit> == 0b00100101'00000000); +// static_assert(ClassToken<KLightSession> == 0b01000101'00000000); +static_assert(ClassToken<KPort> == 0b10000101'00000000); +static_assert(ClassToken<KSession> == 0b00011001'00000000); +static_assert(ClassToken<KSharedMemory> == 0b00101001'00000000); +static_assert(ClassToken<KEvent> == 0b01001001'00000000); +static_assert(ClassToken<KWritableEvent> == 0b10001001'00000000); +// static_assert(ClassToken<KLightClientSession> == 0b00110001'00000000); +// static_assert(ClassToken<KLightServerSession> == 0b01010001'00000000); +static_assert(ClassToken<KTransferMemory> == 0b10010001'00000000); +// static_assert(ClassToken<KDeviceAddressSpace> == 0b01100001'00000000); +// static_assert(ClassToken<KSessionRequest> == 0b10100001'00000000); +// static_assert(ClassToken<KCodeMemory> == 0b11000001'00000000); + +// Ensure that the token hierarchy is correct. + +// Base classes +static_assert(ClassToken<KAutoObject> == (0b00000000)); +static_assert(ClassToken<KSynchronizationObject> == (0b00000001 | ClassToken<KAutoObject>)); +static_assert(ClassToken<KReadableEvent> == (0b00000010 | ClassToken<KSynchronizationObject>)); + +// Final classes +// static_assert(ClassToken<KInterruptEvent> == ((0b00000111 << 8) | ClassToken<KReadableEvent>)); +// static_assert(ClassToken<KDebug> == ((0b00001011 << 8) | ClassToken<KSynchronizationObject>)); +static_assert(ClassToken<KThread> == ((0b00010011 << 8) | ClassToken<KSynchronizationObject>)); +static_assert(ClassToken<KServerPort> == ((0b00100011 << 8) | ClassToken<KSynchronizationObject>)); +static_assert(ClassToken<KServerSession> == + ((0b01000011 << 8) | ClassToken<KSynchronizationObject>)); +static_assert(ClassToken<KClientPort> == ((0b10000011 << 8) | ClassToken<KSynchronizationObject>)); +static_assert(ClassToken<KClientSession> == ((0b00001101 << 8) | ClassToken<KAutoObject>)); +static_assert(ClassToken<KProcess> == ((0b00010101 << 8) | ClassToken<KSynchronizationObject>)); +static_assert(ClassToken<KResourceLimit> == ((0b00100101 << 8) | ClassToken<KAutoObject>)); +// static_assert(ClassToken<KLightSession> == ((0b01000101 << 8) | ClassToken<KAutoObject>)); +static_assert(ClassToken<KPort> == ((0b10000101 << 8) | ClassToken<KAutoObject>)); +static_assert(ClassToken<KSession> == ((0b00011001 << 8) | ClassToken<KAutoObject>)); +static_assert(ClassToken<KSharedMemory> == ((0b00101001 << 8) | ClassToken<KAutoObject>)); +static_assert(ClassToken<KEvent> == ((0b01001001 << 8) | ClassToken<KAutoObject>)); +static_assert(ClassToken<KWritableEvent> == ((0b10001001 << 8) | ClassToken<KAutoObject>)); +// static_assert(ClassToken<KLightClientSession> == ((0b00110001 << 8) | ClassToken<KAutoObject>)); +// static_assert(ClassToken<KLightServerSession> == ((0b01010001 << 8) | ClassToken<KAutoObject>)); +static_assert(ClassToken<KTransferMemory> == ((0b10010001 << 8) | ClassToken<KAutoObject>)); +// static_assert(ClassToken<KDeviceAddressSpace> == ((0b01100001 << 8) | ClassToken<KAutoObject>)); +// static_assert(ClassToken<KSessionRequest> == ((0b10100001 << 8) | ClassToken<KAutoObject>)); +// static_assert(ClassToken<KCodeMemory> == ((0b11000001 << 8) | ClassToken<KAutoObject>)); + +// Ensure that the token hierarchy reflects the class hierarchy. + +// Base classes. +static_assert(!std::is_final<KSynchronizationObject>::value && + std::is_base_of<KAutoObject, KSynchronizationObject>::value); +static_assert(!std::is_final<KReadableEvent>::value && + std::is_base_of<KSynchronizationObject, KReadableEvent>::value); + +// Final classes +// static_assert(std::is_final<KInterruptEvent>::value && +// std::is_base_of<KReadableEvent, KInterruptEvent>::value); +// static_assert(std::is_final<KDebug>::value && +// std::is_base_of<KSynchronizationObject, KDebug>::value); +static_assert(std::is_final<KThread>::value && + std::is_base_of<KSynchronizationObject, KThread>::value); +static_assert(std::is_final<KServerPort>::value && + std::is_base_of<KSynchronizationObject, KServerPort>::value); +static_assert(std::is_final<KServerSession>::value && + std::is_base_of<KSynchronizationObject, KServerSession>::value); +static_assert(std::is_final<KClientPort>::value && + std::is_base_of<KSynchronizationObject, KClientPort>::value); +static_assert(std::is_final<KClientSession>::value && + std::is_base_of<KAutoObject, KClientSession>::value); +static_assert(std::is_final<KProcess>::value && + std::is_base_of<KSynchronizationObject, KProcess>::value); +static_assert(std::is_final<KResourceLimit>::value && + std::is_base_of<KAutoObject, KResourceLimit>::value); +// static_assert(std::is_final<KLightSession>::value && +// std::is_base_of<KAutoObject, KLightSession>::value); +static_assert(std::is_final<KPort>::value && std::is_base_of<KAutoObject, KPort>::value); +static_assert(std::is_final<KSession>::value && std::is_base_of<KAutoObject, KSession>::value); +static_assert(std::is_final<KSharedMemory>::value && + std::is_base_of<KAutoObject, KSharedMemory>::value); +static_assert(std::is_final<KEvent>::value && std::is_base_of<KAutoObject, KEvent>::value); +static_assert(std::is_final<KWritableEvent>::value && + std::is_base_of<KAutoObject, KWritableEvent>::value); +// static_assert(std::is_final<KLightClientSession>::value && +// std::is_base_of<KAutoObject, KLightClientSession>::value); +// static_assert(std::is_final<KLightServerSession>::value && +// std::is_base_of<KAutoObject, KLightServerSession>::value); +static_assert(std::is_final<KTransferMemory>::value && + std::is_base_of<KAutoObject, KTransferMemory>::value); +// static_assert(std::is_final<KDeviceAddressSpace>::value && +// std::is_base_of<KAutoObject, KDeviceAddressSpace>::value); +// static_assert(std::is_final<KSessionRequest>::value && +// std::is_base_of<KAutoObject, KSessionRequest>::value); +// static_assert(std::is_final<KCodeMemory>::value && +// std::is_base_of<KAutoObject, KCodeMemory>::value); + +} // namespace Kernel diff --git a/src/core/hle/kernel/k_class_token.h b/src/core/hle/kernel/k_class_token.h new file mode 100644 index 000000000..c28db49ec --- /dev/null +++ b/src/core/hle/kernel/k_class_token.h @@ -0,0 +1,131 @@ +// Copyright 2021 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include <atomic> + +#include "common/assert.h" +#include "common/bit_util.h" +#include "common/common_types.h" + +namespace Kernel { + +class KAutoObject; + +class KClassTokenGenerator { +public: + using TokenBaseType = u16; + +public: + static constexpr size_t BaseClassBits = 8; + static constexpr size_t FinalClassBits = (sizeof(TokenBaseType) * CHAR_BIT) - BaseClassBits; + // One bit per base class. + static constexpr size_t NumBaseClasses = BaseClassBits; + // Final classes are permutations of three bits. + static constexpr size_t NumFinalClasses = [] { + TokenBaseType index = 0; + for (size_t i = 0; i < FinalClassBits; i++) { + for (size_t j = i + 1; j < FinalClassBits; j++) { + for (size_t k = j + 1; k < FinalClassBits; k++) { + index++; + } + } + } + return index; + }(); + +private: + template <TokenBaseType Index> + static constexpr inline TokenBaseType BaseClassToken = 1U << Index; + + template <TokenBaseType Index> + static constexpr inline TokenBaseType FinalClassToken = [] { + TokenBaseType index = 0; + for (size_t i = 0; i < FinalClassBits; i++) { + for (size_t j = i + 1; j < FinalClassBits; j++) { + for (size_t k = j + 1; k < FinalClassBits; k++) { + if ((index++) == Index) { + return static_cast<TokenBaseType>(((1ULL << i) | (1ULL << j) | (1ULL << k)) + << BaseClassBits); + } + } + } + } + }(); + + template <typename T> + static constexpr inline TokenBaseType GetClassToken() { + static_assert(std::is_base_of<KAutoObject, T>::value); + if constexpr (std::is_same<T, KAutoObject>::value) { + static_assert(T::ObjectType == ObjectType::KAutoObject); + return 0; + } else if constexpr (!std::is_final<T>::value) { + static_assert(ObjectType::BaseClassesStart <= T::ObjectType && + T::ObjectType < ObjectType::BaseClassesEnd); + constexpr auto ClassIndex = static_cast<TokenBaseType>(T::ObjectType) - + static_cast<TokenBaseType>(ObjectType::BaseClassesStart); + return BaseClassToken<ClassIndex> | GetClassToken<typename T::BaseClass>(); + } else if constexpr (ObjectType::FinalClassesStart <= T::ObjectType && + T::ObjectType < ObjectType::FinalClassesEnd) { + constexpr auto ClassIndex = static_cast<TokenBaseType>(T::ObjectType) - + static_cast<TokenBaseType>(ObjectType::FinalClassesStart); + return FinalClassToken<ClassIndex> | GetClassToken<typename T::BaseClass>(); + } else { + static_assert(!std::is_same<T, T>::value, "GetClassToken: Invalid Type"); + } + }; + +public: + enum class ObjectType { + KAutoObject, + + BaseClassesStart, + + KSynchronizationObject = BaseClassesStart, + KReadableEvent, + + BaseClassesEnd, + + FinalClassesStart = BaseClassesEnd, + + KInterruptEvent = FinalClassesStart, + KDebug, + KThread, + KServerPort, + KServerSession, + KClientPort, + KClientSession, + KProcess, + KResourceLimit, + KLightSession, + KPort, + KSession, + KSharedMemory, + KEvent, + KWritableEvent, + KLightClientSession, + KLightServerSession, + KTransferMemory, + KDeviceAddressSpace, + KSessionRequest, + KCodeMemory, + + // NOTE: True order for these has not been determined yet. + KAlpha, + KBeta, + + FinalClassesEnd = FinalClassesStart + NumFinalClasses, + }; + + template <typename T> + static constexpr inline TokenBaseType ClassToken = GetClassToken<T>(); +}; + +using ClassTokenType = KClassTokenGenerator::TokenBaseType; + +template <typename T> +static constexpr inline ClassTokenType ClassToken = KClassTokenGenerator::ClassToken<T>; + +} // namespace Kernel diff --git a/src/core/hle/kernel/k_client_port.cpp b/src/core/hle/kernel/k_client_port.cpp new file mode 100644 index 000000000..b6f1d713f --- /dev/null +++ b/src/core/hle/kernel/k_client_port.cpp @@ -0,0 +1,125 @@ +// Copyright 2021 Citra Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "common/scope_exit.h" +#include "core/hle/kernel/hle_ipc.h" +#include "core/hle/kernel/k_client_port.h" +#include "core/hle/kernel/k_port.h" +#include "core/hle/kernel/k_scheduler.h" +#include "core/hle/kernel/k_scoped_resource_reservation.h" +#include "core/hle/kernel/k_session.h" +#include "core/hle/kernel/svc_results.h" + +namespace Kernel { + +KClientPort::KClientPort(KernelCore& kernel) : KSynchronizationObject{kernel} {} +KClientPort::~KClientPort() = default; + +void KClientPort::Initialize(KPort* parent_, s32 max_sessions_, std::string&& name_) { + // Set member variables. + num_sessions = 0; + peak_sessions = 0; + parent = parent_; + max_sessions = max_sessions_; + name = std::move(name_); +} + +void KClientPort::OnSessionFinalized() { + KScopedSchedulerLock sl{kernel}; + + const auto prev = num_sessions--; + if (prev == max_sessions) { + this->NotifyAvailable(); + } +} + +void KClientPort::OnServerClosed() {} + +bool KClientPort::IsLight() const { + return this->GetParent()->IsLight(); +} + +bool KClientPort::IsServerClosed() const { + return this->GetParent()->IsServerClosed(); +} + +void KClientPort::Destroy() { + // Note with our parent that we're closed. + parent->OnClientClosed(); + + // Close our reference to our parent. + parent->Close(); +} + +bool KClientPort::IsSignaled() const { + return num_sessions < max_sessions; +} + +ResultCode KClientPort::CreateSession(KClientSession** out) { + // Reserve a new session from the resource limit. + KScopedResourceReservation session_reservation(kernel.CurrentProcess()->GetResourceLimit(), + LimitableResource::Sessions); + R_UNLESS(session_reservation.Succeeded(), ResultLimitReached); + + // Update the session counts. + { + // Atomically increment the number of sessions. + s32 new_sessions; + { + const auto max = max_sessions; + auto cur_sessions = num_sessions.load(std::memory_order_acquire); + do { + R_UNLESS(cur_sessions < max, ResultOutOfSessions); + new_sessions = cur_sessions + 1; + } while (!num_sessions.compare_exchange_weak(cur_sessions, new_sessions, + std::memory_order_relaxed)); + } + + // Atomically update the peak session tracking. + { + auto peak = peak_sessions.load(std::memory_order_acquire); + do { + if (peak >= new_sessions) { + break; + } + } while (!peak_sessions.compare_exchange_weak(peak, new_sessions, + std::memory_order_relaxed)); + } + } + + // Create a new session. + KSession* session = KSession::Create(kernel); + if (session == nullptr) { + /* Decrement the session count. */ + const auto prev = num_sessions--; + if (prev == max_sessions) { + this->NotifyAvailable(); + } + + return ResultOutOfResource; + } + + // Initialize the session. + session->Initialize(this, parent->GetName()); + + // Commit the session reservation. + session_reservation.Commit(); + + // Register the session. + KSession::Register(kernel, session); + auto session_guard = SCOPE_GUARD({ + session->GetClientSession().Close(); + session->GetServerSession().Close(); + }); + + // Enqueue the session with our parent. + R_TRY(parent->EnqueueSession(std::addressof(session->GetServerSession()))); + + // We succeeded, so set the output. + session_guard.Cancel(); + *out = std::addressof(session->GetClientSession()); + return RESULT_SUCCESS; +} + +} // namespace Kernel diff --git a/src/core/hle/kernel/k_client_port.h b/src/core/hle/kernel/k_client_port.h new file mode 100644 index 000000000..ec1d7e12e --- /dev/null +++ b/src/core/hle/kernel/k_client_port.h @@ -0,0 +1,61 @@ +// Copyright 2016 Citra Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include <memory> +#include <string> + +#include "common/common_types.h" +#include "core/hle/kernel/k_synchronization_object.h" +#include "core/hle/result.h" + +namespace Kernel { + +class KClientSession; +class KernelCore; +class KPort; + +class KClientPort final : public KSynchronizationObject { + KERNEL_AUTOOBJECT_TRAITS(KClientPort, KSynchronizationObject); + +public: + explicit KClientPort(KernelCore& kernel); + virtual ~KClientPort() override; + + void Initialize(KPort* parent_, s32 max_sessions_, std::string&& name_); + void OnSessionFinalized(); + void OnServerClosed(); + + const KPort* GetParent() const { + return parent; + } + + s32 GetNumSessions() const { + return num_sessions; + } + s32 GetPeakSessions() const { + return peak_sessions; + } + s32 GetMaxSessions() const { + return max_sessions; + } + + bool IsLight() const; + bool IsServerClosed() const; + + // Overridden virtual functions. + virtual void Destroy() override; + virtual bool IsSignaled() const override; + + ResultCode CreateSession(KClientSession** out); + +private: + std::atomic<s32> num_sessions{}; + std::atomic<s32> peak_sessions{}; + s32 max_sessions{}; + KPort* parent{}; +}; + +} // namespace Kernel diff --git a/src/core/hle/kernel/k_client_session.cpp b/src/core/hle/kernel/k_client_session.cpp new file mode 100644 index 000000000..0618dc246 --- /dev/null +++ b/src/core/hle/kernel/k_client_session.cpp @@ -0,0 +1,31 @@ +// Copyright 2021 yuzu emulator team +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "core/hle/kernel/hle_ipc.h" +#include "core/hle/kernel/k_client_session.h" +#include "core/hle/kernel/k_server_session.h" +#include "core/hle/kernel/k_session.h" +#include "core/hle/kernel/k_thread.h" +#include "core/hle/kernel/svc_results.h" +#include "core/hle/result.h" + +namespace Kernel { + +KClientSession::KClientSession(KernelCore& kernel) : KAutoObjectWithSlabHeapAndContainer{kernel} {} +KClientSession::~KClientSession() = default; + +void KClientSession::Destroy() { + parent->OnClientClosed(); + parent->Close(); +} + +void KClientSession::OnServerClosed() {} + +ResultCode KClientSession::SendSyncRequest(KThread* thread, Core::Memory::Memory& memory, + Core::Timing::CoreTiming& core_timing) { + // Signal the server session that new data is available + return parent->GetServerSession().HandleSyncRequest(thread, memory, core_timing); +} + +} // namespace Kernel diff --git a/src/core/hle/kernel/k_client_session.h b/src/core/hle/kernel/k_client_session.h new file mode 100644 index 000000000..6476a588b --- /dev/null +++ b/src/core/hle/kernel/k_client_session.h @@ -0,0 +1,61 @@ +// Copyright 2021 yuzu emulator team +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include <memory> +#include <string> + +#include "core/hle/kernel/k_auto_object.h" +#include "core/hle/kernel/k_synchronization_object.h" +#include "core/hle/kernel/slab_helpers.h" +#include "core/hle/result.h" + +union ResultCode; + +namespace Core::Memory { +class Memory; +} + +namespace Core::Timing { +class CoreTiming; +} + +namespace Kernel { + +class KernelCore; +class KSession; +class KThread; + +class KClientSession final + : public KAutoObjectWithSlabHeapAndContainer<KClientSession, KAutoObjectWithList> { + KERNEL_AUTOOBJECT_TRAITS(KClientSession, KAutoObject); + +public: + explicit KClientSession(KernelCore& kernel); + virtual ~KClientSession(); + + void Initialize(KSession* parent_, std::string&& name_) { + // Set member variables. + parent = parent_; + name = std::move(name_); + } + + virtual void Destroy() override; + static void PostDestroy([[maybe_unused]] uintptr_t arg) {} + + KSession* GetParent() const { + return parent; + } + + ResultCode SendSyncRequest(KThread* thread, Core::Memory::Memory& memory, + Core::Timing::CoreTiming& core_timing); + + void OnServerClosed(); + +private: + KSession* parent{}; +}; + +} // namespace Kernel diff --git a/src/core/hle/kernel/k_condition_variable.cpp b/src/core/hle/kernel/k_condition_variable.cpp index 170d8fa0d..f51cf3e7b 100644 --- a/src/core/hle/kernel/k_condition_variable.cpp +++ b/src/core/hle/kernel/k_condition_variable.cpp @@ -7,12 +7,13 @@ #include "core/arm/exclusive_monitor.h" #include "core/core.h" #include "core/hle/kernel/k_condition_variable.h" +#include "core/hle/kernel/k_linked_list.h" +#include "core/hle/kernel/k_process.h" #include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" #include "core/hle/kernel/k_synchronization_object.h" #include "core/hle/kernel/k_thread.h" #include "core/hle/kernel/kernel.h" -#include "core/hle/kernel/process.h" #include "core/hle/kernel/svc_common.h" #include "core/hle/kernel/svc_results.h" #include "core/memory.h" @@ -107,8 +108,8 @@ ResultCode KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 val // Wait for the address. { - std::shared_ptr<KThread> owner_thread; - ASSERT(!owner_thread); + KScopedAutoObject<KThread> owner_thread; + ASSERT(owner_thread.IsNull()); { KScopedSchedulerLock sl(kernel); cur_thread->SetSyncedObject(nullptr, RESULT_SUCCESS); @@ -126,8 +127,10 @@ ResultCode KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 val R_UNLESS(test_tag == (handle | Svc::HandleWaitMask), RESULT_SUCCESS); // Get the lock owner thread. - owner_thread = kernel.CurrentProcess()->GetHandleTable().Get<KThread>(handle); - R_UNLESS(owner_thread, ResultInvalidHandle); + owner_thread = + kernel.CurrentProcess()->GetHandleTable().GetObjectWithoutPseudoHandle<KThread>( + handle); + R_UNLESS(owner_thread.IsNotNull(), ResultInvalidHandle); // Update the lock. cur_thread->SetAddressKey(addr, value); @@ -137,7 +140,7 @@ ResultCode KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 val cur_thread->SetMutexWaitAddressForDebugging(addr); } } - ASSERT(owner_thread); + ASSERT(owner_thread.IsNotNull()); } // Remove the thread as a waiter from the lock owner. @@ -176,19 +179,22 @@ KThread* KConditionVariable::SignalImpl(KThread* thread) { KThread* thread_to_close = nullptr; if (can_access) { - if (prev_tag == InvalidHandle) { + if (prev_tag == Svc::InvalidHandle) { // If nobody held the lock previously, we're all good. thread->SetSyncedObject(nullptr, RESULT_SUCCESS); thread->Wakeup(); } else { // Get the previous owner. - auto owner_thread = kernel.CurrentProcess()->GetHandleTable().Get<KThread>( - prev_tag & ~Svc::HandleWaitMask); + KThread* owner_thread = kernel.CurrentProcess() + ->GetHandleTable() + .GetObjectWithoutPseudoHandle<KThread>( + static_cast<Handle>(prev_tag & ~Svc::HandleWaitMask)) + .ReleasePointerUnsafe(); if (owner_thread) { // Add the thread as a waiter on the owner. owner_thread->AddWaiter(thread); - thread_to_close = owner_thread.get(); + thread_to_close = owner_thread; } else { // The lock was tagged with a thread that doesn't exist. thread->SetSyncedObject(nullptr, ResultInvalidState); @@ -208,9 +214,7 @@ void KConditionVariable::Signal(u64 cv_key, s32 count) { // Prepare for signaling. constexpr int MaxThreads = 16; - // TODO(bunnei): This should just be Thread once we implement KAutoObject instead of using - // std::shared_ptr. - std::vector<std::shared_ptr<KThread>> thread_list; + KLinkedList<KThread> thread_list{kernel}; std::array<KThread*, MaxThreads> thread_array; s32 num_to_close{}; @@ -228,7 +232,7 @@ void KConditionVariable::Signal(u64 cv_key, s32 count) { if (num_to_close < MaxThreads) { thread_array[num_to_close++] = thread; } else { - thread_list.push_back(SharedFrom(thread)); + thread_list.push_back(*thread); } } @@ -250,8 +254,9 @@ void KConditionVariable::Signal(u64 cv_key, s32 count) { } // Close threads in the list. - for (auto it = thread_list.begin(); it != thread_list.end(); it = thread_list.erase(it)) { - (*it)->Close(); + for (auto it = thread_list.begin(); it != thread_list.end(); + it = thread_list.erase(kernel, it)) { + (*it).Close(); } } diff --git a/src/core/hle/kernel/k_event.cpp b/src/core/hle/kernel/k_event.cpp index bb2fa4ad5..986355b78 100644 --- a/src/core/hle/kernel/k_event.cpp +++ b/src/core/hle/kernel/k_event.cpp @@ -3,30 +3,53 @@ // Refer to the license.txt file included. #include "core/hle/kernel/k_event.h" -#include "core/hle/kernel/k_readable_event.h" -#include "core/hle/kernel/k_writable_event.h" +#include "core/hle/kernel/k_process.h" +#include "core/hle/kernel/k_resource_limit.h" namespace Kernel { -KEvent::KEvent(KernelCore& kernel, std::string&& name) : Object{kernel, std::move(name)} {} +KEvent::KEvent(KernelCore& kernel) + : KAutoObjectWithSlabHeapAndContainer{kernel}, readable_event{kernel}, writable_event{kernel} {} KEvent::~KEvent() = default; -std::shared_ptr<KEvent> KEvent::Create(KernelCore& kernel, std::string&& name) { - return std::make_shared<KEvent>(kernel, std::move(name)); -} +void KEvent::Initialize(std::string&& name_) { + // Increment reference count. + // Because reference count is one on creation, this will result + // in a reference count of two. Thus, when both readable and + // writable events are closed this object will be destroyed. + Open(); -void KEvent::Initialize() { // Create our sub events. - readable_event = std::make_shared<KReadableEvent>(kernel, GetName() + ":Readable"); - writable_event = std::make_shared<KWritableEvent>(kernel, GetName() + ":Writable"); + KAutoObject::Create(std::addressof(readable_event)); + KAutoObject::Create(std::addressof(writable_event)); // Initialize our sub sessions. - readable_event->Initialize(this); - writable_event->Initialize(this); + readable_event.Initialize(this, name_ + ":Readable"); + writable_event.Initialize(this, name_ + ":Writable"); + + // Set our owner process. + owner = kernel.CurrentProcess(); + if (owner) { + owner->Open(); + } // Mark initialized. + name = std::move(name_); initialized = true; } +void KEvent::Finalize() { + KAutoObjectWithSlabHeapAndContainer<KEvent, KAutoObjectWithList>::Finalize(); +} + +void KEvent::PostDestroy(uintptr_t arg) { + // Release the event count resource the owner process holds. + KProcess* owner = reinterpret_cast<KProcess*>(arg); + if (owner) { + owner->GetResourceLimit()->Release(LimitableResource::Events, 1); + owner->Close(); + } +} + } // namespace Kernel diff --git a/src/core/hle/kernel/k_event.h b/src/core/hle/kernel/k_event.h index 2fb887129..4ca869930 100644 --- a/src/core/hle/kernel/k_event.h +++ b/src/core/hle/kernel/k_event.h @@ -4,53 +4,54 @@ #pragma once -#include "core/hle/kernel/object.h" +#include "core/hle/kernel/k_readable_event.h" +#include "core/hle/kernel/k_writable_event.h" +#include "core/hle/kernel/slab_helpers.h" namespace Kernel { class KernelCore; class KReadableEvent; class KWritableEvent; +class KProcess; -class KEvent final : public Object { -public: - explicit KEvent(KernelCore& kernel, std::string&& name); - ~KEvent() override; +class KEvent final : public KAutoObjectWithSlabHeapAndContainer<KEvent, KAutoObjectWithList> { + KERNEL_AUTOOBJECT_TRAITS(KEvent, KAutoObject); - static std::shared_ptr<KEvent> Create(KernelCore& kernel, std::string&& name); +public: + explicit KEvent(KernelCore& kernel); + virtual ~KEvent(); - void Initialize(); + void Initialize(std::string&& name); - void Finalize() override {} + virtual void Finalize() override; - std::string GetTypeName() const override { - return "KEvent"; + virtual bool IsInitialized() const override { + return initialized; } - static constexpr HandleType HANDLE_TYPE = HandleType::Event; - HandleType GetHandleType() const override { - return HANDLE_TYPE; + virtual uintptr_t GetPostDestroyArgument() const override { + return reinterpret_cast<uintptr_t>(owner); } - std::shared_ptr<KReadableEvent>& GetReadableEvent() { - return readable_event; - } + static void PostDestroy(uintptr_t arg); - std::shared_ptr<KWritableEvent>& GetWritableEvent() { - return writable_event; + virtual KProcess* GetOwner() const override { + return owner; } - const std::shared_ptr<KReadableEvent>& GetReadableEvent() const { + KReadableEvent& GetReadableEvent() { return readable_event; } - const std::shared_ptr<KWritableEvent>& GetWritableEvent() const { + KWritableEvent& GetWritableEvent() { return writable_event; } private: - std::shared_ptr<KReadableEvent> readable_event; - std::shared_ptr<KWritableEvent> writable_event; + KReadableEvent readable_event; + KWritableEvent writable_event; + KProcess* owner{}; bool initialized{}; }; diff --git a/src/core/hle/kernel/k_handle_table.cpp b/src/core/hle/kernel/k_handle_table.cpp new file mode 100644 index 000000000..0378447f6 --- /dev/null +++ b/src/core/hle/kernel/k_handle_table.cpp @@ -0,0 +1,135 @@ +// Copyright 2021 yuzu emulator team +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "core/hle/kernel/k_handle_table.h" + +namespace Kernel { + +KHandleTable::KHandleTable(KernelCore& kernel_) : kernel{kernel_} {} +KHandleTable ::~KHandleTable() = default; + +ResultCode KHandleTable::Finalize() { + // Get the table and clear our record of it. + u16 saved_table_size = 0; + { + KScopedSpinLock lk(m_lock); + + std::swap(m_table_size, saved_table_size); + } + + // Close and free all entries. + for (size_t i = 0; i < saved_table_size; i++) { + if (KAutoObject* obj = m_objects[i]; obj != nullptr) { + obj->Close(); + } + } + + return RESULT_SUCCESS; +} + +bool KHandleTable::Remove(Handle handle) { + // Don't allow removal of a pseudo-handle. + if (Svc::IsPseudoHandle(handle)) { + return false; + } + + // Handles must not have reserved bits set. + const auto handle_pack = HandlePack(handle); + if (handle_pack.reserved != 0) { + return false; + } + + // Find the object and free the entry. + KAutoObject* obj = nullptr; + { + KScopedSpinLock lk(m_lock); + + if (this->IsValidHandle(handle)) { + const auto index = handle_pack.index; + + obj = m_objects[index]; + this->FreeEntry(index); + } else { + return false; + } + } + + // Close the object. + obj->Close(); + return true; +} + +ResultCode KHandleTable::Add(Handle* out_handle, KAutoObject* obj, u16 type) { + KScopedSpinLock lk(m_lock); + + // Never exceed our capacity. + R_UNLESS(m_count < m_table_size, ResultOutOfHandles); + + // Allocate entry, set output handle. + { + const auto linear_id = this->AllocateLinearId(); + const auto index = this->AllocateEntry(); + + m_entry_infos[index].info = {.linear_id = linear_id, .type = type}; + m_objects[index] = obj; + + obj->Open(); + + *out_handle = EncodeHandle(static_cast<u16>(index), linear_id); + } + + return RESULT_SUCCESS; +} + +ResultCode KHandleTable::Reserve(Handle* out_handle) { + KScopedSpinLock lk(m_lock); + + // Never exceed our capacity. + R_UNLESS(m_count < m_table_size, ResultOutOfHandles); + + *out_handle = EncodeHandle(static_cast<u16>(this->AllocateEntry()), this->AllocateLinearId()); + return RESULT_SUCCESS; +} + +void KHandleTable::Unreserve(Handle handle) { + KScopedSpinLock lk(m_lock); + + // Unpack the handle. + const auto handle_pack = HandlePack(handle); + const auto index = handle_pack.index; + const auto linear_id = handle_pack.linear_id; + const auto reserved = handle_pack.reserved; + ASSERT(reserved == 0); + ASSERT(linear_id != 0); + + if (index < m_table_size) { + // NOTE: This code does not check the linear id. + ASSERT(m_objects[index] == nullptr); + this->FreeEntry(index); + } +} + +void KHandleTable::Register(Handle handle, KAutoObject* obj, u16 type) { + KScopedSpinLock lk(m_lock); + + // Unpack the handle. + const auto handle_pack = HandlePack(handle); + const auto index = handle_pack.index; + const auto linear_id = handle_pack.linear_id; + const auto reserved = handle_pack.reserved; + ASSERT(reserved == 0); + ASSERT(linear_id != 0); + + if (index < m_table_size) { + // Set the entry. + ASSERT(m_objects[index] == nullptr); + + m_entry_infos[index].info = {.linear_id = static_cast<u16>(linear_id), .type = type}; + m_objects[index] = obj; + + obj->Open(); + } +} + +} // namespace Kernel diff --git a/src/core/hle/kernel/k_handle_table.h b/src/core/hle/kernel/k_handle_table.h new file mode 100644 index 000000000..ba9dd061d --- /dev/null +++ b/src/core/hle/kernel/k_handle_table.h @@ -0,0 +1,310 @@ +// Copyright 2021 yuzu emulator team +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include <array> + +#include "common/assert.h" +#include "common/bit_field.h" +#include "common/bit_util.h" +#include "common/common_types.h" +#include "core/hle/kernel/k_auto_object.h" +#include "core/hle/kernel/k_spin_lock.h" +#include "core/hle/kernel/k_thread.h" +#include "core/hle/kernel/kernel.h" +#include "core/hle/kernel/svc_common.h" +#include "core/hle/kernel/svc_results.h" +#include "core/hle/result.h" + +namespace Kernel { + +class KernelCore; + +class KHandleTable { + YUZU_NON_COPYABLE(KHandleTable); + YUZU_NON_MOVEABLE(KHandleTable); + +public: + static constexpr size_t MaxTableSize = 1024; + +public: + explicit KHandleTable(KernelCore& kernel_); + ~KHandleTable(); + + ResultCode Initialize(s32 size) { + R_UNLESS(size <= static_cast<s32>(MaxTableSize), ResultOutOfMemory); + + // Initialize all fields. + m_max_count = 0; + m_table_size = static_cast<u16>((size <= 0) ? MaxTableSize : size); + m_next_linear_id = MinLinearId; + m_count = 0; + m_free_head_index = -1; + + // Free all entries. + for (s32 i = 0; i < static_cast<s32>(m_table_size); ++i) { + m_objects[i] = nullptr; + m_entry_infos[i].next_free_index = i - 1; + m_free_head_index = i; + } + + return RESULT_SUCCESS; + } + + size_t GetTableSize() const { + return m_table_size; + } + size_t GetCount() const { + return m_count; + } + size_t GetMaxCount() const { + return m_max_count; + } + + ResultCode Finalize(); + bool Remove(Handle handle); + + template <typename T = KAutoObject> + KScopedAutoObject<T> GetObjectWithoutPseudoHandle(Handle handle) const { + // Lock and look up in table. + KScopedSpinLock lk(m_lock); + + if constexpr (std::is_same_v<T, KAutoObject>) { + return this->GetObjectImpl(handle); + } else { + if (auto* obj = this->GetObjectImpl(handle); obj != nullptr) { + return obj->DynamicCast<T*>(); + } else { + return nullptr; + } + } + } + + template <typename T = KAutoObject> + KScopedAutoObject<T> GetObject(Handle handle) const { + // Handle pseudo-handles. + if constexpr (std::derived_from<KProcess, T>) { + if (handle == Svc::PseudoHandle::CurrentProcess) { + auto* const cur_process = kernel.CurrentProcess(); + ASSERT(cur_process != nullptr); + return cur_process; + } + } else if constexpr (std::derived_from<KThread, T>) { + if (handle == Svc::PseudoHandle::CurrentThread) { + auto* const cur_thread = GetCurrentThreadPointer(kernel); + ASSERT(cur_thread != nullptr); + return cur_thread; + } + } + + return this->template GetObjectWithoutPseudoHandle<T>(handle); + } + + ResultCode Reserve(Handle* out_handle); + void Unreserve(Handle handle); + + template <typename T> + ResultCode Add(Handle* out_handle, T* obj) { + static_assert(std::is_base_of_v<KAutoObject, T>); + return this->Add(out_handle, obj, obj->GetTypeObj().GetClassToken()); + } + + template <typename T> + void Register(Handle handle, T* obj) { + static_assert(std::is_base_of_v<KAutoObject, T>); + return this->Register(handle, obj, obj->GetTypeObj().GetClassToken()); + } + + template <typename T> + bool GetMultipleObjects(T** out, const Handle* handles, size_t num_handles) const { + // Try to convert and open all the handles. + size_t num_opened; + { + // Lock the table. + KScopedSpinLock lk(m_lock); + for (num_opened = 0; num_opened < num_handles; num_opened++) { + // Get the current handle. + const auto cur_handle = handles[num_opened]; + + // Get the object for the current handle. + KAutoObject* cur_object = this->GetObjectImpl(cur_handle); + if (cur_object == nullptr) { + break; + } + + // Cast the current object to the desired type. + T* cur_t = cur_object->DynamicCast<T*>(); + if (cur_t == nullptr) { + break; + } + + // Open a reference to the current object. + cur_t->Open(); + out[num_opened] = cur_t; + } + } + + // If we converted every object, succeed. + if (num_opened == num_handles) { + return true; + } + + // If we didn't convert entry object, close the ones we opened. + for (size_t i = 0; i < num_opened; i++) { + out[i]->Close(); + } + + return false; + } + +private: + ResultCode Add(Handle* out_handle, KAutoObject* obj, u16 type); + void Register(Handle handle, KAutoObject* obj, u16 type); + + s32 AllocateEntry() { + ASSERT(m_count < m_table_size); + + const auto index = m_free_head_index; + + m_free_head_index = m_entry_infos[index].GetNextFreeIndex(); + + m_max_count = std::max(m_max_count, ++m_count); + + return index; + } + + void FreeEntry(s32 index) { + ASSERT(m_count > 0); + + m_objects[index] = nullptr; + m_entry_infos[index].next_free_index = m_free_head_index; + + m_free_head_index = index; + + --m_count; + } + + u16 AllocateLinearId() { + const u16 id = m_next_linear_id++; + if (m_next_linear_id > MaxLinearId) { + m_next_linear_id = MinLinearId; + } + return id; + } + + bool IsValidHandle(Handle handle) const { + // Unpack the handle. + const auto handle_pack = HandlePack(handle); + const auto raw_value = handle_pack.raw; + const auto index = handle_pack.index; + const auto linear_id = handle_pack.linear_id; + const auto reserved = handle_pack.reserved; + ASSERT(reserved == 0); + + // Validate our indexing information. + if (raw_value == 0) { + return false; + } + if (linear_id == 0) { + return false; + } + if (index >= m_table_size) { + return false; + } + + // Check that there's an object, and our serial id is correct. + if (m_objects[index] == nullptr) { + return false; + } + if (m_entry_infos[index].GetLinearId() != linear_id) { + return false; + } + + return true; + } + + KAutoObject* GetObjectImpl(Handle handle) const { + // Handles must not have reserved bits set. + const auto handle_pack = HandlePack(handle); + if (handle_pack.reserved != 0) { + return nullptr; + } + + if (this->IsValidHandle(handle)) { + return m_objects[handle_pack.index]; + } else { + return nullptr; + } + } + + KAutoObject* GetObjectByIndexImpl(Handle* out_handle, size_t index) const { + + // Index must be in bounds. + if (index >= m_table_size) { + return nullptr; + } + + // Ensure entry has an object. + if (KAutoObject* obj = m_objects[index]; obj != nullptr) { + *out_handle = EncodeHandle(static_cast<u16>(index), m_entry_infos[index].GetLinearId()); + return obj; + } else { + return nullptr; + } + } + +private: + union HandlePack { + HandlePack() = default; + HandlePack(Handle handle) : raw{static_cast<u32>(handle)} {} + + u32 raw; + BitField<0, 15, u32> index; + BitField<15, 15, u32> linear_id; + BitField<30, 2, u32> reserved; + }; + + static constexpr u16 MinLinearId = 1; + static constexpr u16 MaxLinearId = 0x7FFF; + + static constexpr Handle EncodeHandle(u16 index, u16 linear_id) { + HandlePack handle{}; + handle.index.Assign(index); + handle.linear_id.Assign(linear_id); + handle.reserved.Assign(0); + return handle.raw; + } + + union EntryInfo { + struct { + u16 linear_id; + u16 type; + } info; + s32 next_free_index; + + constexpr u16 GetLinearId() const { + return info.linear_id; + } + constexpr u16 GetType() const { + return info.type; + } + constexpr s32 GetNextFreeIndex() const { + return next_free_index; + } + }; + +private: + std::array<EntryInfo, MaxTableSize> m_entry_infos{}; + std::array<KAutoObject*, MaxTableSize> m_objects{}; + s32 m_free_head_index{-1}; + u16 m_table_size{}; + u16 m_max_count{}; + u16 m_next_linear_id{MinLinearId}; + u16 m_count{}; + mutable KSpinLock m_lock; + KernelCore& kernel; +}; + +} // namespace Kernel diff --git a/src/core/hle/kernel/k_linked_list.h b/src/core/hle/kernel/k_linked_list.h new file mode 100644 index 000000000..500f44685 --- /dev/null +++ b/src/core/hle/kernel/k_linked_list.h @@ -0,0 +1,238 @@ +// Copyright 2021 yuzu emulator team +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include <boost/intrusive/list.hpp> + +#include "common/assert.h" +#include "core/hle/kernel/slab_helpers.h" + +namespace Kernel { + +class KernelCore; + +class KLinkedListNode : public boost::intrusive::list_base_hook<>, + public KSlabAllocated<KLinkedListNode> { + +public: + KLinkedListNode() = default; + + void Initialize(void* it) { + m_item = it; + } + + void* GetItem() const { + return m_item; + } + +private: + void* m_item = nullptr; +}; + +template <typename T> +class KLinkedList : private boost::intrusive::list<KLinkedListNode> { +private: + using BaseList = boost::intrusive::list<KLinkedListNode>; + +public: + template <bool Const> + class Iterator; + + using value_type = T; + using size_type = size_t; + using difference_type = ptrdiff_t; + using pointer = value_type*; + using const_pointer = const value_type*; + using reference = value_type&; + using const_reference = const value_type&; + using iterator = Iterator<false>; + using const_iterator = Iterator<true>; + using reverse_iterator = std::reverse_iterator<iterator>; + using const_reverse_iterator = std::reverse_iterator<const_iterator>; + + template <bool Const> + class Iterator { + private: + using BaseIterator = BaseList::iterator; + friend class KLinkedList; + + public: + using iterator_category = std::bidirectional_iterator_tag; + using value_type = typename KLinkedList::value_type; + using difference_type = typename KLinkedList::difference_type; + using pointer = std::conditional_t<Const, KLinkedList::const_pointer, KLinkedList::pointer>; + using reference = + std::conditional_t<Const, KLinkedList::const_reference, KLinkedList::reference>; + + public: + explicit Iterator(BaseIterator it) : m_base_it(it) {} + + pointer GetItem() const { + return static_cast<pointer>(m_base_it->GetItem()); + } + + bool operator==(const Iterator& rhs) const { + return m_base_it == rhs.m_base_it; + } + + bool operator!=(const Iterator& rhs) const { + return !(*this == rhs); + } + + pointer operator->() const { + return this->GetItem(); + } + + reference operator*() const { + return *this->GetItem(); + } + + Iterator& operator++() { + ++m_base_it; + return *this; + } + + Iterator& operator--() { + --m_base_it; + return *this; + } + + Iterator operator++(int) { + const Iterator it{*this}; + ++(*this); + return it; + } + + Iterator operator--(int) { + const Iterator it{*this}; + --(*this); + return it; + } + + operator Iterator<true>() const { + return Iterator<true>(m_base_it); + } + + private: + BaseIterator m_base_it; + }; + +public: + constexpr KLinkedList(KernelCore& kernel_) : BaseList(), kernel{kernel_} {} + + ~KLinkedList() { + // Erase all elements. + for (auto it = this->begin(); it != this->end(); it = this->erase(kernel, it)) { + } + + // Ensure we succeeded. + ASSERT(this->empty()); + } + + // Iterator accessors. + iterator begin() { + return iterator(BaseList::begin()); + } + + const_iterator begin() const { + return const_iterator(BaseList::begin()); + } + + iterator end() { + return iterator(BaseList::end()); + } + + const_iterator end() const { + return const_iterator(BaseList::end()); + } + + const_iterator cbegin() const { + return this->begin(); + } + + const_iterator cend() const { + return this->end(); + } + + reverse_iterator rbegin() { + return reverse_iterator(this->end()); + } + + const_reverse_iterator rbegin() const { + return const_reverse_iterator(this->end()); + } + + reverse_iterator rend() { + return reverse_iterator(this->begin()); + } + + const_reverse_iterator rend() const { + return const_reverse_iterator(this->begin()); + } + + const_reverse_iterator crbegin() const { + return this->rbegin(); + } + + const_reverse_iterator crend() const { + return this->rend(); + } + + // Content management. + using BaseList::empty; + using BaseList::size; + + reference back() { + return *(--this->end()); + } + + const_reference back() const { + return *(--this->end()); + } + + reference front() { + return *this->begin(); + } + + const_reference front() const { + return *this->begin(); + } + + iterator insert(const_iterator pos, reference ref) { + KLinkedListNode* node = KLinkedListNode::Allocate(kernel); + ASSERT(node != nullptr); + node->Initialize(std::addressof(ref)); + return iterator(BaseList::insert(pos.m_base_it, *node)); + } + + void push_back(reference ref) { + this->insert(this->end(), ref); + } + + void push_front(reference ref) { + this->insert(this->begin(), ref); + } + + void pop_back() { + this->erase(--this->end()); + } + + void pop_front() { + this->erase(this->begin()); + } + + iterator erase(KernelCore& kernel, const iterator pos) { + KLinkedListNode* freed_node = std::addressof(*pos.m_base_it); + iterator ret = iterator(BaseList::erase(pos.m_base_it)); + KLinkedListNode::Free(kernel, freed_node); + + return ret; + } + +private: + KernelCore& kernel; +}; + +} // namespace Kernel diff --git a/src/core/hle/kernel/k_memory_block.h b/src/core/hle/kernel/k_memory_block.h index c5b9c5e85..a7fdb5fb8 100644 --- a/src/core/hle/kernel/k_memory_block.h +++ b/src/core/hle/kernel/k_memory_block.h @@ -134,6 +134,10 @@ enum class KMemoryPermission : u8 { }; DECLARE_ENUM_FLAG_OPERATORS(KMemoryPermission); +constexpr KMemoryPermission ConvertToKMemoryPermission(Svc::MemoryPermission perm) { + return static_cast<KMemoryPermission>(perm); +} + enum class KMemoryAttribute : u8 { None = 0x00, Mask = 0x7F, diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp index d09d5ce48..d4ce98ee3 100644 --- a/src/core/hle/kernel/k_page_table.cpp +++ b/src/core/hle/kernel/k_page_table.cpp @@ -11,11 +11,11 @@ #include "core/hle/kernel/k_memory_block_manager.h" #include "core/hle/kernel/k_page_linked_list.h" #include "core/hle/kernel/k_page_table.h" +#include "core/hle/kernel/k_process.h" #include "core/hle/kernel/k_resource_limit.h" #include "core/hle/kernel/k_scoped_resource_reservation.h" #include "core/hle/kernel/k_system_control.h" #include "core/hle/kernel/kernel.h" -#include "core/hle/kernel/process.h" #include "core/hle/kernel/svc_results.h" #include "core/memory.h" @@ -420,7 +420,7 @@ ResultCode KPageTable::MapPhysicalMemory(VAddr addr, std::size_t size) { remaining_size); if (!memory_reservation.Succeeded()) { LOG_ERROR(Kernel, "Could not reserve remaining {:X} bytes", remaining_size); - return ResultResourceLimitedExceeded; + return ResultLimitReached; } KPageLinkedList page_linked_list; @@ -578,7 +578,7 @@ ResultCode KPageTable::Unmap(VAddr dst_addr, VAddr src_addr, std::size_t size) { AddRegionToPages(dst_addr, num_pages, dst_pages); if (!dst_pages.IsEqual(src_pages)) { - return ResultInvalidMemoryRange; + return ResultInvalidMemoryRegion; } { @@ -641,6 +641,45 @@ ResultCode KPageTable::MapPages(VAddr addr, KPageLinkedList& page_linked_list, K return RESULT_SUCCESS; } +ResultCode KPageTable::UnmapPages(VAddr addr, const KPageLinkedList& page_linked_list) { + VAddr cur_addr{addr}; + + for (const auto& node : page_linked_list.Nodes()) { + const std::size_t num_pages{(addr - cur_addr) / PageSize}; + if (const auto result{ + Operate(addr, num_pages, KMemoryPermission::None, OperationType::Unmap)}; + result.IsError()) { + return result; + } + + cur_addr += node.GetNumPages() * PageSize; + } + + return RESULT_SUCCESS; +} + +ResultCode KPageTable::UnmapPages(VAddr addr, KPageLinkedList& page_linked_list, + KMemoryState state) { + std::lock_guard lock{page_table_lock}; + + const std::size_t num_pages{page_linked_list.GetNumPages()}; + const std::size_t size{num_pages * PageSize}; + + if (!CanContain(addr, size, state)) { + return ResultInvalidCurrentMemory; + } + + if (IsRegionMapped(addr, num_pages * PageSize)) { + return ResultInvalidCurrentMemory; + } + + CASCADE_CODE(UnmapPages(addr, page_linked_list)); + + block_manager->Update(addr, num_pages, state, KMemoryPermission::None); + + return RESULT_SUCCESS; +} + ResultCode KPageTable::SetCodeMemoryPermission(VAddr addr, std::size_t size, KMemoryPermission perm) { @@ -790,7 +829,7 @@ ResultVal<VAddr> KPageTable::SetHeapSize(std::size_t size) { if (!memory_reservation.Succeeded()) { LOG_ERROR(Kernel, "Could not reserve heap extension of size {:X} bytes", delta); - return ResultResourceLimitedExceeded; + return ResultLimitReached; } KPageLinkedList page_linked_list; @@ -1067,7 +1106,7 @@ constexpr std::size_t KPageTable::GetRegionSize(KMemoryState state) const { } } -constexpr bool KPageTable::CanContain(VAddr addr, std::size_t size, KMemoryState state) const { +bool KPageTable::CanContain(VAddr addr, std::size_t size, KMemoryState state) const { const VAddr end{addr + size}; const VAddr last{end - 1}; const VAddr region_start{GetRegionAddress(state)}; diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h index 49b824379..8c2cc03eb 100644 --- a/src/core/hle/kernel/k_page_table.h +++ b/src/core/hle/kernel/k_page_table.h @@ -40,6 +40,7 @@ public: ResultCode Unmap(VAddr dst_addr, VAddr src_addr, std::size_t size); ResultCode MapPages(VAddr addr, KPageLinkedList& page_linked_list, KMemoryState state, KMemoryPermission perm); + ResultCode UnmapPages(VAddr addr, KPageLinkedList& page_linked_list, KMemoryState state); ResultCode SetCodeMemoryPermission(VAddr addr, std::size_t size, KMemoryPermission perm); KMemoryInfo QueryInfo(VAddr addr); ResultCode ReserveTransferMemory(VAddr addr, std::size_t size, KMemoryPermission perm); @@ -63,6 +64,8 @@ public: return page_table_impl; } + bool CanContain(VAddr addr, std::size_t size, KMemoryState state) const; + private: enum class OperationType : u32 { Map, @@ -79,6 +82,7 @@ private: ResultCode InitializeMemoryLayout(VAddr start, VAddr end); ResultCode MapPages(VAddr addr, const KPageLinkedList& page_linked_list, KMemoryPermission perm); + ResultCode UnmapPages(VAddr addr, const KPageLinkedList& page_linked_list); void MapPhysicalMemory(KPageLinkedList& page_linked_list, VAddr start, VAddr end); bool IsRegionMapped(VAddr address, u64 size); bool IsRegionContiguous(VAddr addr, u64 size) const; @@ -92,7 +96,6 @@ private: OperationType operation, PAddr map_addr = 0); constexpr VAddr GetRegionAddress(KMemoryState state) const; constexpr std::size_t GetRegionSize(KMemoryState state) const; - constexpr bool CanContain(VAddr addr, std::size_t size, KMemoryState state) const; constexpr ResultCode CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask, KMemoryState state, KMemoryPermission perm_mask, @@ -216,8 +219,6 @@ public: constexpr PAddr GetPhysicalAddr(VAddr addr) { return page_table_impl.backing_addr[addr >> PageBits] + addr; } - -private: constexpr bool Contains(VAddr addr) const { return address_space_start <= addr && addr <= address_space_end - 1; } @@ -225,6 +226,8 @@ private: return address_space_start <= addr && addr < addr + size && addr + size - 1 <= address_space_end - 1; } + +private: constexpr bool IsKernel() const { return is_kernel; } diff --git a/src/core/hle/kernel/k_port.cpp b/src/core/hle/kernel/k_port.cpp new file mode 100644 index 000000000..734aa2a8c --- /dev/null +++ b/src/core/hle/kernel/k_port.cpp @@ -0,0 +1,68 @@ +// Copyright 2021 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "core/hle/kernel/hle_ipc.h" +#include "core/hle/kernel/k_port.h" +#include "core/hle/kernel/k_scheduler.h" +#include "core/hle/kernel/svc_results.h" + +namespace Kernel { + +KPort::KPort(KernelCore& kernel) + : KAutoObjectWithSlabHeapAndContainer{kernel}, server{kernel}, client{kernel} {} + +KPort::~KPort() = default; + +void KPort::Initialize(s32 max_sessions_, bool is_light_, const std::string& name_) { + // Open a new reference count to the initialized port. + Open(); + + // Create and initialize our server/client pair. + KAutoObject::Create(std::addressof(server)); + KAutoObject::Create(std::addressof(client)); + server.Initialize(this, name_ + ":Server"); + client.Initialize(this, max_sessions_, name_ + ":Client"); + + // Set our member variables. + is_light = is_light_; + name = name_; + state = State::Normal; +} + +void KPort::OnClientClosed() { + KScopedSchedulerLock sl{kernel}; + + if (state == State::Normal) { + state = State::ClientClosed; + } +} + +void KPort::OnServerClosed() { + KScopedSchedulerLock sl{kernel}; + + if (state == State::Normal) { + state = State::ServerClosed; + } +} + +bool KPort::IsServerClosed() const { + KScopedSchedulerLock sl{kernel}; + return state == State::ServerClosed; +} + +ResultCode KPort::EnqueueSession(KServerSession* session) { + KScopedSchedulerLock sl{kernel}; + + R_UNLESS(state == State::Normal, ResultPortClosed); + + if (server.HasHLEHandler()) { + server.GetHLEHandler()->ClientConnected(session); + } else { + server.EnqueueSession(session); + } + + return RESULT_SUCCESS; +} + +} // namespace Kernel diff --git a/src/core/hle/kernel/k_port.h b/src/core/hle/kernel/k_port.h new file mode 100644 index 000000000..f1b2838d8 --- /dev/null +++ b/src/core/hle/kernel/k_port.h @@ -0,0 +1,69 @@ +// Copyright 2021 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include <memory> +#include <string> + +#include "common/common_types.h" +#include "core/hle/kernel/k_client_port.h" +#include "core/hle/kernel/k_server_port.h" +#include "core/hle/kernel/slab_helpers.h" +#include "core/hle/result.h" + +namespace Kernel { + +class KServerSession; + +class KPort final : public KAutoObjectWithSlabHeapAndContainer<KPort, KAutoObjectWithList> { + KERNEL_AUTOOBJECT_TRAITS(KPort, KAutoObject); + +public: + explicit KPort(KernelCore& kernel); + virtual ~KPort(); + + static void PostDestroy([[maybe_unused]] uintptr_t arg) {} + + void Initialize(s32 max_sessions_, bool is_light_, const std::string& name_); + void OnClientClosed(); + void OnServerClosed(); + + bool IsLight() const { + return is_light; + } + + bool IsServerClosed() const; + + ResultCode EnqueueSession(KServerSession* session); + + KClientPort& GetClientPort() { + return client; + } + KServerPort& GetServerPort() { + return server; + } + const KClientPort& GetClientPort() const { + return client; + } + const KServerPort& GetServerPort() const { + return server; + } + +private: + enum class State : u8 { + Invalid = 0, + Normal = 1, + ClientClosed = 2, + ServerClosed = 3, + }; + +private: + KServerPort server; + KClientPort client; + State state{State::Invalid}; + bool is_light{}; +}; + +} // namespace Kernel diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/k_process.cpp index e35deb8e2..174318180 100644 --- a/src/core/hle/kernel/process.cpp +++ b/src/core/hle/kernel/k_process.cpp @@ -17,13 +17,14 @@ #include "core/hle/kernel/code_set.h" #include "core/hle/kernel/k_memory_block_manager.h" #include "core/hle/kernel/k_page_table.h" +#include "core/hle/kernel/k_process.h" #include "core/hle/kernel/k_resource_limit.h" #include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/k_scoped_resource_reservation.h" +#include "core/hle/kernel/k_shared_memory.h" #include "core/hle/kernel/k_slab_heap.h" #include "core/hle/kernel/k_thread.h" #include "core/hle/kernel/kernel.h" -#include "core/hle/kernel/process.h" #include "core/hle/kernel/svc_results.h" #include "core/hle/lock.h" #include "core/memory.h" @@ -37,17 +38,20 @@ namespace { * @param owner_process The parent process for the main thread * @param priority The priority to give the main thread */ -void SetupMainThread(Core::System& system, Process& owner_process, u32 priority, VAddr stack_top) { +void SetupMainThread(Core::System& system, KProcess& owner_process, u32 priority, VAddr stack_top) { const VAddr entry_point = owner_process.PageTable().GetCodeRegionStart(); ASSERT(owner_process.GetResourceLimit()->Reserve(LimitableResource::Threads, 1)); - auto thread_res = - KThread::CreateUserThread(system, ThreadType::User, "main", entry_point, priority, 0, - owner_process.GetIdealCoreId(), stack_top, &owner_process); - std::shared_ptr<KThread> thread = std::move(thread_res).Unwrap(); + KThread* thread = KThread::Create(system.Kernel()); + ASSERT(KThread::InitializeUserThread(system, thread, entry_point, 0, stack_top, priority, + owner_process.GetIdealCoreId(), &owner_process) + .IsSuccess()); // Register 1 must be a handle to the main thread - const Handle thread_handle = owner_process.GetHandleTable().Create(thread).Unwrap(); + Handle thread_handle{}; + owner_process.GetHandleTable().Add(&thread_handle, thread); + + thread->SetName("main"); thread->GetContext32().cpu_registers[0] = 0; thread->GetContext64().cpu_registers[0] = 0; thread->GetContext32().cpu_registers[1] = thread_handle; @@ -114,10 +118,10 @@ private: std::bitset<num_slot_entries> is_slot_used; }; -std::shared_ptr<Process> Process::Create(Core::System& system, std::string name, ProcessType type) { +ResultCode KProcess::Initialize(KProcess* process, Core::System& system, std::string name, + ProcessType type) { auto& kernel = system.Kernel(); - std::shared_ptr<Process> process = std::make_shared<Process>(system); process->name = std::move(name); process->resource_limit = kernel.GetSystemResourceLimit(); @@ -126,6 +130,7 @@ std::shared_ptr<Process> Process::Create(Core::System& system, std::string name, process->process_id = type == ProcessType::KernelInternal ? kernel.CreateNewKernelProcessID() : kernel.CreateNewUserProcessID(); process->capabilities.InitializeForMetadatalessProcess(); + process->is_initialized = true; std::mt19937 rng(Settings::values.rng_seed.GetValue().value_or(std::time(nullptr))); std::uniform_int_distribution<u64> distribution; @@ -133,14 +138,18 @@ std::shared_ptr<Process> Process::Create(Core::System& system, std::string name, [&] { return distribution(rng); }); kernel.AppendNewProcess(process); - return process; + + // Open a reference to the resource limit. + process->resource_limit->Open(); + + return RESULT_SUCCESS; } -std::shared_ptr<KResourceLimit> Process::GetResourceLimit() const { +KResourceLimit* KProcess::GetResourceLimit() const { return resource_limit; } -void Process::IncrementThreadCount() { +void KProcess::IncrementThreadCount() { ASSERT(num_threads >= 0); num_created_threads++; @@ -149,7 +158,7 @@ void Process::IncrementThreadCount() { } } -void Process::DecrementThreadCount() { +void KProcess::DecrementThreadCount() { ASSERT(num_threads > 0); if (const auto count = --num_threads; count == 0) { @@ -157,31 +166,34 @@ void Process::DecrementThreadCount() { } } -u64 Process::GetTotalPhysicalMemoryAvailable() const { +u64 KProcess::GetTotalPhysicalMemoryAvailable() const { const u64 capacity{resource_limit->GetFreeValue(LimitableResource::PhysicalMemory) + page_table->GetTotalHeapSize() + GetSystemResourceSize() + image_size + main_thread_stack_size}; - ASSERT(capacity == kernel.MemoryManager().GetSize(KMemoryManager::Pool::Application)); + if (const auto pool_size = kernel.MemoryManager().GetSize(KMemoryManager::Pool::Application); + capacity != pool_size) { + LOG_WARNING(Kernel, "capacity {} != application pool size {}", capacity, pool_size); + } if (capacity < memory_usage_capacity) { return capacity; } return memory_usage_capacity; } -u64 Process::GetTotalPhysicalMemoryAvailableWithoutSystemResource() const { +u64 KProcess::GetTotalPhysicalMemoryAvailableWithoutSystemResource() const { return GetTotalPhysicalMemoryAvailable() - GetSystemResourceSize(); } -u64 Process::GetTotalPhysicalMemoryUsed() const { +u64 KProcess::GetTotalPhysicalMemoryUsed() const { return image_size + main_thread_stack_size + page_table->GetTotalHeapSize() + GetSystemResourceSize(); } -u64 Process::GetTotalPhysicalMemoryUsedWithoutSystemResource() const { +u64 KProcess::GetTotalPhysicalMemoryUsedWithoutSystemResource() const { return GetTotalPhysicalMemoryUsed() - GetSystemResourceUsage(); } -bool Process::ReleaseUserException(KThread* thread) { +bool KProcess::ReleaseUserException(KThread* thread) { KScopedSchedulerLock sl{kernel}; if (exception_thread == thread) { @@ -206,7 +218,7 @@ bool Process::ReleaseUserException(KThread* thread) { } } -void Process::PinCurrentThread() { +void KProcess::PinCurrentThread() { ASSERT(kernel.GlobalSchedulerContext().IsLocked()); // Get the current thread. @@ -221,7 +233,7 @@ void Process::PinCurrentThread() { KScheduler::SetSchedulerUpdateNeeded(kernel); } -void Process::UnpinCurrentThread() { +void KProcess::UnpinCurrentThread() { ASSERT(kernel.GlobalSchedulerContext().IsLocked()); // Get the current thread. @@ -236,15 +248,39 @@ void Process::UnpinCurrentThread() { KScheduler::SetSchedulerUpdateNeeded(kernel); } -void Process::RegisterThread(const KThread* thread) { +ResultCode KProcess::AddSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr address, + [[maybe_unused]] size_t size) { + // Lock ourselves, to prevent concurrent access. + KScopedLightLock lk(state_lock); + + // TODO(bunnei): Manage KSharedMemoryInfo list here. + + // Open a reference to the shared memory. + shmem->Open(); + + return RESULT_SUCCESS; +} + +void KProcess::RemoveSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr address, + [[maybe_unused]] size_t size) { + // Lock ourselves, to prevent concurrent access. + KScopedLightLock lk(state_lock); + + // TODO(bunnei): Manage KSharedMemoryInfo list here. + + // Close a reference to the shared memory. + shmem->Close(); +} + +void KProcess::RegisterThread(const KThread* thread) { thread_list.push_back(thread); } -void Process::UnregisterThread(const KThread* thread) { +void KProcess::UnregisterThread(const KThread* thread) { thread_list.remove(thread); } -ResultCode Process::Reset() { +ResultCode KProcess::Reset() { // Lock the process and the scheduler. KScopedLightLock lk(state_lock); KScopedSchedulerLock sl{kernel}; @@ -258,8 +294,8 @@ ResultCode Process::Reset() { return RESULT_SUCCESS; } -ResultCode Process::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, - std::size_t code_size) { +ResultCode KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, + std::size_t code_size) { program_id = metadata.GetTitleID(); ideal_core = metadata.GetMainThreadCore(); is_64bit_process = metadata.Is64BitProgram(); @@ -271,7 +307,7 @@ ResultCode Process::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, if (!memory_reservation.Succeeded()) { LOG_ERROR(Kernel, "Could not reserve process memory requirements of size {:X} bytes", code_size + system_resource_size); - return ResultResourceLimitedExceeded; + return ResultLimitReached; } // Initialize proces address space if (const ResultCode result{ @@ -318,10 +354,10 @@ ResultCode Process::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, tls_region_address = CreateTLSRegion(); memory_reservation.Commit(); - return handle_table.SetSize(capabilities.GetHandleTableSize()); + return handle_table.Initialize(capabilities.GetHandleTableSize()); } -void Process::Run(s32 main_thread_priority, u64 stack_size) { +void KProcess::Run(s32 main_thread_priority, u64 stack_size) { AllocateMainThreadStack(stack_size); resource_limit->Reserve(LimitableResource::Threads, 1); resource_limit->Reserve(LimitableResource::PhysicalMemory, main_thread_stack_size); @@ -331,18 +367,18 @@ void Process::Run(s32 main_thread_priority, u64 stack_size) { ChangeStatus(ProcessStatus::Running); - SetupMainThread(system, *this, main_thread_priority, main_thread_stack_top); + SetupMainThread(kernel.System(), *this, main_thread_priority, main_thread_stack_top); } -void Process::PrepareForTermination() { +void KProcess::PrepareForTermination() { ChangeStatus(ProcessStatus::Exiting); - const auto stop_threads = [this](const std::vector<std::shared_ptr<KThread>>& thread_list) { + const auto stop_threads = [this](const std::vector<KThread*>& thread_list) { for (auto& thread : thread_list) { if (thread->GetOwnerProcess() != this) continue; - if (thread.get() == kernel.CurrentScheduler()->GetCurrentThread()) + if (thread == kernel.CurrentScheduler()->GetCurrentThread()) continue; // TODO(Subv): When are the other running/ready threads terminated? @@ -353,7 +389,7 @@ void Process::PrepareForTermination() { } }; - stop_threads(system.GlobalSchedulerContext().GetThreadList()); + stop_threads(kernel.System().GlobalSchedulerContext().GetThreadList()); FreeTLSRegion(tls_region_address); tls_region_address = 0; @@ -366,6 +402,16 @@ void Process::PrepareForTermination() { ChangeStatus(ProcessStatus::Exited); } +void KProcess::Finalize() { + // Release memory to the resource limit. + if (resource_limit != nullptr) { + resource_limit->Close(); + } + + // Perform inherited finalization. + KAutoObjectWithSlabHeapAndContainer<KProcess, KSynchronizationObject>::Finalize(); +} + /** * Attempts to find a TLS page that contains a free slot for * use by a thread. @@ -379,8 +425,8 @@ static auto FindTLSPageWithAvailableSlots(std::vector<TLSPage>& tls_pages) { [](const auto& page) { return page.HasAvailableSlots(); }); } -VAddr Process::CreateTLSRegion() { - KScopedSchedulerLock lock(system.Kernel()); +VAddr KProcess::CreateTLSRegion() { + KScopedSchedulerLock lock(kernel); if (auto tls_page_iter{FindTLSPageWithAvailableSlots(tls_pages)}; tls_page_iter != tls_pages.cend()) { return *tls_page_iter->ReserveSlot(); @@ -391,7 +437,7 @@ VAddr Process::CreateTLSRegion() { const VAddr start{page_table->GetKernelMapRegionStart()}; const VAddr size{page_table->GetKernelMapRegionEnd() - start}; - const PAddr tls_map_addr{system.DeviceMemory().GetPhysicalAddr(tls_page_ptr)}; + const PAddr tls_map_addr{kernel.System().DeviceMemory().GetPhysicalAddr(tls_page_ptr)}; const VAddr tls_page_addr{page_table ->AllocateAndMapMemory(1, PageSize, true, start, size / PageSize, KMemoryState::ThreadLocal, @@ -410,8 +456,8 @@ VAddr Process::CreateTLSRegion() { return *reserve_result; } -void Process::FreeTLSRegion(VAddr tls_address) { - KScopedSchedulerLock lock(system.Kernel()); +void KProcess::FreeTLSRegion(VAddr tls_address) { + KScopedSchedulerLock lock(kernel); const VAddr aligned_address = Common::AlignDown(tls_address, Core::Memory::PAGE_SIZE); auto iter = std::find_if(tls_pages.begin(), tls_pages.end(), [aligned_address](const auto& page) { @@ -425,33 +471,34 @@ void Process::FreeTLSRegion(VAddr tls_address) { iter->ReleaseSlot(tls_address); } -void Process::LoadModule(CodeSet code_set, VAddr base_addr) { +void KProcess::LoadModule(CodeSet code_set, VAddr base_addr) { std::lock_guard lock{HLE::g_hle_lock}; const auto ReprotectSegment = [&](const CodeSet::Segment& segment, KMemoryPermission permission) { page_table->SetCodeMemoryPermission(segment.addr + base_addr, segment.size, permission); }; - system.Memory().WriteBlock(*this, base_addr, code_set.memory.data(), code_set.memory.size()); + kernel.System().Memory().WriteBlock(*this, base_addr, code_set.memory.data(), + code_set.memory.size()); ReprotectSegment(code_set.CodeSegment(), KMemoryPermission::ReadAndExecute); ReprotectSegment(code_set.RODataSegment(), KMemoryPermission::Read); ReprotectSegment(code_set.DataSegment(), KMemoryPermission::ReadAndWrite); } -bool Process::IsSignaled() const { +bool KProcess::IsSignaled() const { ASSERT(kernel.GlobalSchedulerContext().IsLocked()); return is_signaled; } -Process::Process(Core::System& system) - : KSynchronizationObject{system.Kernel()}, page_table{std::make_unique<KPageTable>(system)}, - handle_table{system.Kernel()}, address_arbiter{system}, condition_var{system}, - state_lock{system.Kernel()}, system{system} {} +KProcess::KProcess(KernelCore& kernel) + : KAutoObjectWithSlabHeapAndContainer{kernel}, + page_table{std::make_unique<KPageTable>(kernel.System())}, handle_table{kernel}, + address_arbiter{kernel.System()}, condition_var{kernel.System()}, state_lock{kernel} {} -Process::~Process() = default; +KProcess::~KProcess() = default; -void Process::ChangeStatus(ProcessStatus new_status) { +void KProcess::ChangeStatus(ProcessStatus new_status) { if (status == new_status) { return; } @@ -461,7 +508,7 @@ void Process::ChangeStatus(ProcessStatus new_status) { NotifyAvailable(); } -ResultCode Process::AllocateMainThreadStack(std::size_t stack_size) { +ResultCode KProcess::AllocateMainThreadStack(std::size_t stack_size) { ASSERT(stack_size); // The kernel always ensures that the given stack size is page aligned. diff --git a/src/core/hle/kernel/process.h b/src/core/hle/kernel/k_process.h index 45eefb90e..62ab26b05 100644 --- a/src/core/hle/kernel/process.h +++ b/src/core/hle/kernel/k_process.h @@ -11,11 +11,13 @@ #include <unordered_map> #include <vector> #include "common/common_types.h" -#include "core/hle/kernel/handle_table.h" #include "core/hle/kernel/k_address_arbiter.h" +#include "core/hle/kernel/k_auto_object.h" #include "core/hle/kernel/k_condition_variable.h" +#include "core/hle/kernel/k_handle_table.h" #include "core/hle/kernel/k_synchronization_object.h" #include "core/hle/kernel/process_capability.h" +#include "core/hle/kernel/slab_helpers.h" #include "core/hle/result.h" namespace Core { @@ -60,10 +62,13 @@ enum class ProcessStatus { DebugBreak, }; -class Process final : public KSynchronizationObject { +class KProcess final + : public KAutoObjectWithSlabHeapAndContainer<KProcess, KSynchronizationObject> { + KERNEL_AUTOOBJECT_TRAITS(KProcess, KSynchronizationObject); + public: - explicit Process(Core::System& system); - ~Process() override; + explicit KProcess(KernelCore& kernel); + ~KProcess() override; enum : u64 { /// Lowest allowed process ID for a kernel initial process. @@ -85,20 +90,8 @@ public: static constexpr std::size_t RANDOM_ENTROPY_SIZE = 4; - static std::shared_ptr<Process> Create(Core::System& system, std::string name, - ProcessType type); - - std::string GetTypeName() const override { - return "Process"; - } - std::string GetName() const override { - return name; - } - - static constexpr HandleType HANDLE_TYPE = HandleType::Process; - HandleType GetHandleType() const override { - return HANDLE_TYPE; - } + static ResultCode Initialize(KProcess* process, Core::System& system, std::string name, + ProcessType type); /// Gets a reference to the process' page table. KPageTable& PageTable() { @@ -111,12 +104,12 @@ public: } /// Gets a reference to the process' handle table. - HandleTable& GetHandleTable() { + KHandleTable& GetHandleTable() { return handle_table; } /// Gets a const reference to the process' handle table. - const HandleTable& GetHandleTable() const { + const KHandleTable& GetHandleTable() const { return handle_table; } @@ -167,7 +160,7 @@ public: } /// Gets the resource limit descriptor for this process - std::shared_ptr<KResourceLimit> GetResourceLimit() const; + KResourceLimit* GetResourceLimit() const; /// Gets the ideal CPU core ID for this process u8 GetIdealCoreId() const { @@ -338,9 +331,19 @@ public: void LoadModule(CodeSet code_set, VAddr base_addr); - bool IsSignaled() const override; + virtual bool IsInitialized() const override { + return is_initialized; + } + + static void PostDestroy([[maybe_unused]] uintptr_t arg) {} + + virtual void Finalize(); + + virtual u64 GetId() const override final { + return GetProcessID(); + } - void Finalize() override {} + virtual bool IsSignaled() const override; void PinCurrentThread(); void UnpinCurrentThread(); @@ -349,6 +352,9 @@ public: return state_lock; } + ResultCode AddSharedMemory(KSharedMemory* shmem, VAddr address, size_t size); + void RemoveSharedMemory(KSharedMemory* shmem, VAddr address, size_t size); + /////////////////////////////////////////////////////////////////////////////////////////////// // Thread-local storage management @@ -399,7 +405,7 @@ private: u32 system_resource_size = 0; /// Resource limit descriptor for this process - std::shared_ptr<KResourceLimit> resource_limit; + KResourceLimit* resource_limit{}; /// The ideal CPU core for this process, threads are scheduled on this core by default. u8 ideal_core = 0; @@ -423,7 +429,7 @@ private: u64 total_process_running_time_ticks = 0; /// Per-process handle table for storing created object handles in. - HandleTable handle_table; + KHandleTable handle_table; /// Per-process address arbiter. KAddressArbiter address_arbiter; @@ -454,14 +460,12 @@ private: /// Process total image size std::size_t image_size{}; - /// Name of this process - std::string name; - /// Schedule count of this process s64 schedule_count{}; bool is_signaled{}; bool is_suspended{}; + bool is_initialized{}; std::atomic<s32> num_created_threads{}; std::atomic<u16> num_threads{}; @@ -474,9 +478,6 @@ private: KThread* exception_thread{}; KLightLock state_lock; - - /// System context - Core::System& system; }; } // namespace Kernel diff --git a/src/core/hle/kernel/k_readable_event.cpp b/src/core/hle/kernel/k_readable_event.cpp index 4b4d34857..8fef4bb00 100644 --- a/src/core/hle/kernel/k_readable_event.cpp +++ b/src/core/hle/kernel/k_readable_event.cpp @@ -2,21 +2,18 @@ // Licensed under GPLv2 or any later version // Refer to the license.txt file included. -#include <algorithm> #include "common/assert.h" -#include "common/common_funcs.h" -#include "common/logging/log.h" +#include "core/hle/kernel/k_event.h" #include "core/hle/kernel/k_readable_event.h" #include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/k_thread.h" #include "core/hle/kernel/kernel.h" -#include "core/hle/kernel/object.h" #include "core/hle/kernel/svc_results.h" namespace Kernel { -KReadableEvent::KReadableEvent(KernelCore& kernel, std::string&& name) - : KSynchronizationObject{kernel, std::move(name)} {} +KReadableEvent::KReadableEvent(KernelCore& kernel) : KSynchronizationObject{kernel} {} + KReadableEvent::~KReadableEvent() = default; bool KReadableEvent::IsSignaled() const { @@ -25,6 +22,12 @@ bool KReadableEvent::IsSignaled() const { return is_signaled; } +void KReadableEvent::Destroy() { + if (parent) { + parent->Close(); + } +} + ResultCode KReadableEvent::Signal() { KScopedSchedulerLock lk{kernel}; diff --git a/src/core/hle/kernel/k_readable_event.h b/src/core/hle/kernel/k_readable_event.h index e6f0fd900..1783ef0b8 100644 --- a/src/core/hle/kernel/k_readable_event.h +++ b/src/core/hle/kernel/k_readable_event.h @@ -4,8 +4,9 @@ #pragma once +#include "core/hle/kernel/k_auto_object.h" #include "core/hle/kernel/k_synchronization_object.h" -#include "core/hle/kernel/object.h" +#include "core/hle/kernel/slab_helpers.h" #include "core/hle/result.h" namespace Kernel { @@ -13,31 +14,25 @@ namespace Kernel { class KernelCore; class KEvent; -class KReadableEvent final : public KSynchronizationObject { +class KReadableEvent : public KSynchronizationObject { + KERNEL_AUTOOBJECT_TRAITS(KReadableEvent, KSynchronizationObject); + public: - explicit KReadableEvent(KernelCore& kernel, std::string&& name); + explicit KReadableEvent(KernelCore& kernel); ~KReadableEvent() override; - std::string GetTypeName() const override { - return "KReadableEvent"; - } - - static constexpr HandleType HANDLE_TYPE = HandleType::ReadableEvent; - HandleType GetHandleType() const override { - return HANDLE_TYPE; + void Initialize(KEvent* parent_, std::string&& name_) { + is_signaled = false; + parent = parent_; + name = std::move(name_); } KEvent* GetParent() const { return parent; } - void Initialize(KEvent* parent_) { - is_signaled = false; - parent = parent_; - } - - bool IsSignaled() const override; - void Finalize() override {} + virtual bool IsSignaled() const override; + virtual void Destroy() override; ResultCode Signal(); ResultCode Clear(); diff --git a/src/core/hle/kernel/k_resource_limit.cpp b/src/core/hle/kernel/k_resource_limit.cpp index d05b34ea3..ad5095bfd 100644 --- a/src/core/hle/kernel/k_resource_limit.cpp +++ b/src/core/hle/kernel/k_resource_limit.cpp @@ -10,10 +10,16 @@ namespace Kernel { constexpr s64 DefaultTimeout = 10000000000; // 10 seconds -KResourceLimit::KResourceLimit(KernelCore& kernel, const Core::Timing::CoreTiming& core_timing_) - : Object{kernel}, lock{kernel}, cond_var{kernel}, core_timing(core_timing_) {} +KResourceLimit::KResourceLimit(KernelCore& kernel) + : KAutoObjectWithSlabHeapAndContainer{kernel}, lock{kernel}, cond_var{kernel} {} KResourceLimit::~KResourceLimit() = default; +void KResourceLimit::Initialize(const Core::Timing::CoreTiming* core_timing_) { + core_timing = core_timing_; +} + +void KResourceLimit::Finalize() {} + s64 KResourceLimit::GetLimitValue(LimitableResource which) const { const auto index = static_cast<std::size_t>(which); s64 value{}; @@ -78,7 +84,7 @@ ResultCode KResourceLimit::SetLimitValue(LimitableResource which, s64 value) { } bool KResourceLimit::Reserve(LimitableResource which, s64 value) { - return Reserve(which, value, core_timing.GetGlobalTimeNs().count() + DefaultTimeout); + return Reserve(which, value, core_timing->GetGlobalTimeNs().count() + DefaultTimeout); } bool KResourceLimit::Reserve(LimitableResource which, s64 value, s64 timeout) { @@ -109,7 +115,7 @@ bool KResourceLimit::Reserve(LimitableResource which, s64 value, s64 timeout) { } if (current_hints[index] + value <= limit_values[index] && - (timeout < 0 || core_timing.GetGlobalTimeNs().count() < timeout)) { + (timeout < 0 || core_timing->GetGlobalTimeNs().count() < timeout)) { waiter_count++; cond_var.Wait(&lock, timeout); waiter_count--; diff --git a/src/core/hle/kernel/k_resource_limit.h b/src/core/hle/kernel/k_resource_limit.h index 4542317d0..66ebf32df 100644 --- a/src/core/hle/kernel/k_resource_limit.h +++ b/src/core/hle/kernel/k_resource_limit.h @@ -8,7 +8,6 @@ #include "common/common_types.h" #include "core/hle/kernel/k_light_condition_variable.h" #include "core/hle/kernel/k_light_lock.h" -#include "core/hle/kernel/object.h" union ResultCode; @@ -32,10 +31,16 @@ constexpr bool IsValidResourceType(LimitableResource type) { return type < LimitableResource::Count; } -class KResourceLimit final : public Object { +class KResourceLimit final + : public KAutoObjectWithSlabHeapAndContainer<KResourceLimit, KAutoObjectWithList> { + KERNEL_AUTOOBJECT_TRAITS(KResourceLimit, KAutoObject); + public: - explicit KResourceLimit(KernelCore& kernel, const Core::Timing::CoreTiming& core_timing_); - ~KResourceLimit(); + explicit KResourceLimit(KernelCore& kernel); + virtual ~KResourceLimit(); + + void Initialize(const Core::Timing::CoreTiming* core_timing_); + virtual void Finalize() override; s64 GetLimitValue(LimitableResource which) const; s64 GetCurrentValue(LimitableResource which) const; @@ -49,19 +54,7 @@ public: void Release(LimitableResource which, s64 value); void Release(LimitableResource which, s64 value, s64 hint); - std::string GetTypeName() const override { - return "KResourceLimit"; - } - std::string GetName() const override { - return GetTypeName(); - } - - static constexpr HandleType HANDLE_TYPE = HandleType::ResourceLimit; - HandleType GetHandleType() const override { - return HANDLE_TYPE; - } - - virtual void Finalize() override {} + static void PostDestroy([[maybe_unused]] uintptr_t arg) {} private: using ResourceArray = std::array<s64, static_cast<std::size_t>(LimitableResource::Count)>; @@ -72,6 +65,6 @@ private: mutable KLightLock lock; s32 waiter_count{}; KLightConditionVariable cond_var; - const Core::Timing::CoreTiming& core_timing; + const Core::Timing::CoreTiming* core_timing{}; }; } // namespace Kernel diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp index d1df97305..0115fe6d1 100644 --- a/src/core/hle/kernel/k_scheduler.cpp +++ b/src/core/hle/kernel/k_scheduler.cpp @@ -15,12 +15,12 @@ #include "core/core.h" #include "core/core_timing.h" #include "core/cpu_manager.h" +#include "core/hle/kernel/k_process.h" #include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" #include "core/hle/kernel/k_thread.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/physical_core.h" -#include "core/hle/kernel/process.h" #include "core/hle/kernel/time_manager.h" namespace Kernel { @@ -71,7 +71,7 @@ u64 KScheduler::UpdateHighestPriorityThread(KThread* highest_thread) { } if (state.should_count_idle) { if (highest_thread != nullptr) { - if (Process* process = highest_thread->GetOwnerProcess(); process != nullptr) { + if (KProcess* process = highest_thread->GetOwnerProcess(); process != nullptr) { process->SetRunningThread(core_id, highest_thread, state.idle_count); } } else { @@ -104,7 +104,7 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) { if (top_thread != nullptr) { // If the thread has no waiters, we need to check if the process has a thread pinned. if (top_thread->GetNumKernelWaiters() == 0) { - if (Process* parent = top_thread->GetOwnerProcess(); parent != nullptr) { + if (KProcess* parent = top_thread->GetOwnerProcess(); parent != nullptr) { if (KThread* pinned = parent->GetPinnedThread(static_cast<s32>(core_id)); pinned != nullptr && pinned != top_thread) { // We prefer our parent's pinned thread if possible. However, we also don't @@ -411,7 +411,7 @@ void KScheduler::YieldWithoutCoreMigration(KernelCore& kernel) { // Get the current thread and process. KThread& cur_thread = Kernel::GetCurrentThread(kernel); - Process& cur_process = *kernel.CurrentProcess(); + KProcess& cur_process = *kernel.CurrentProcess(); // If the thread's yield count matches, there's nothing for us to do. if (cur_thread.GetYieldScheduleCount() == cur_process.GetScheduledCount()) { @@ -450,7 +450,7 @@ void KScheduler::YieldWithCoreMigration(KernelCore& kernel) { // Get the current thread and process. KThread& cur_thread = Kernel::GetCurrentThread(kernel); - Process& cur_process = *kernel.CurrentProcess(); + KProcess& cur_process = *kernel.CurrentProcess(); // If the thread's yield count matches, there's nothing for us to do. if (cur_thread.GetYieldScheduleCount() == cur_process.GetScheduledCount()) { @@ -538,7 +538,7 @@ void KScheduler::YieldToAnyThread(KernelCore& kernel) { // Get the current thread and process. KThread& cur_thread = Kernel::GetCurrentThread(kernel); - Process& cur_process = *kernel.CurrentProcess(); + KProcess& cur_process = *kernel.CurrentProcess(); // If the thread's yield count matches, there's nothing for us to do. if (cur_thread.GetYieldScheduleCount() == cur_process.GetScheduledCount()) { @@ -617,7 +617,12 @@ KScheduler::KScheduler(Core::System& system, s32 core_id) : system(system), core state.highest_priority_thread = nullptr; } -KScheduler::~KScheduler() = default; +KScheduler::~KScheduler() { + if (idle_thread) { + idle_thread->Close(); + idle_thread = nullptr; + } +} KThread* KScheduler::GetCurrentThread() const { if (auto result = current_thread.load(); result) { @@ -719,7 +724,7 @@ void KScheduler::ScheduleImpl() { current_thread.store(next_thread); - Process* const previous_process = system.Kernel().CurrentProcess(); + KProcess* const previous_process = system.Kernel().CurrentProcess(); UpdateLastContextSwitchTime(previous_thread, previous_process); @@ -775,7 +780,7 @@ void KScheduler::SwitchToCurrent() { } } -void KScheduler::UpdateLastContextSwitchTime(KThread* thread, Process* process) { +void KScheduler::UpdateLastContextSwitchTime(KThread* thread, KProcess* process) { const u64 prev_switch_ticks = last_context_switch_time; const u64 most_recent_switch_ticks = system.CoreTiming().GetCPUTicks(); const u64 update_ticks = most_recent_switch_ticks - prev_switch_ticks; @@ -792,13 +797,9 @@ void KScheduler::UpdateLastContextSwitchTime(KThread* thread, Process* process) } void KScheduler::Initialize() { - std::string name = "Idle Thread Id:" + std::to_string(core_id); - std::function<void(void*)> init_func = Core::CpuManager::GetIdleThreadStartFunc(); - void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater(); - auto thread_res = KThread::CreateThread( - system, ThreadType::Main, name, 0, KThread::IdleThreadPriority, 0, - static_cast<u32>(core_id), 0, nullptr, std::move(init_func), init_func_parameter); - idle_thread = thread_res.Unwrap().get(); + idle_thread = KThread::Create(system.Kernel()); + ASSERT(KThread::InitializeIdleThread(system, idle_thread, core_id).IsSuccess()); + idle_thread->SetName(fmt::format("IdleThread:{}", core_id)); } KScopedSchedulerLock::KScopedSchedulerLock(KernelCore& kernel) diff --git a/src/core/hle/kernel/k_scheduler.h b/src/core/hle/kernel/k_scheduler.h index 8e32865aa..b789a64a4 100644 --- a/src/core/hle/kernel/k_scheduler.h +++ b/src/core/hle/kernel/k_scheduler.h @@ -24,7 +24,7 @@ class System; namespace Kernel { class KernelCore; -class Process; +class KProcess; class SchedulerLock; class KThread; @@ -165,7 +165,7 @@ private: * most recent tick count retrieved. No special arithmetic is * applied to it. */ - void UpdateLastContextSwitchTime(KThread* thread, Process* process); + void UpdateLastContextSwitchTime(KThread* thread, KProcess* process); static void OnSwitch(void* this_scheduler); void SwitchToCurrent(); @@ -173,12 +173,12 @@ private: KThread* prev_thread{}; std::atomic<KThread*> current_thread{}; - KThread* idle_thread; + KThread* idle_thread{}; std::shared_ptr<Common::Fiber> switch_fiber{}; struct SchedulingState { - std::atomic<bool> needs_scheduling; + std::atomic<bool> needs_scheduling{}; bool interrupt_task_thread_runnable{}; bool should_count_idle{}; u64 idle_count{}; diff --git a/src/core/hle/kernel/k_scoped_resource_reservation.h b/src/core/hle/kernel/k_scoped_resource_reservation.h index c5deca00b..07272075d 100644 --- a/src/core/hle/kernel/k_scoped_resource_reservation.h +++ b/src/core/hle/kernel/k_scoped_resource_reservation.h @@ -8,15 +8,14 @@ #pragma once #include "common/common_types.h" +#include "core/hle/kernel/k_process.h" #include "core/hle/kernel/k_resource_limit.h" -#include "core/hle/kernel/process.h" namespace Kernel { class KScopedResourceReservation { public: - explicit KScopedResourceReservation(std::shared_ptr<KResourceLimit> l, LimitableResource r, - s64 v, s64 timeout) + explicit KScopedResourceReservation(KResourceLimit* l, LimitableResource r, s64 v, s64 timeout) : resource_limit(std::move(l)), value(v), resource(r) { if (resource_limit && value) { success = resource_limit->Reserve(resource, value, timeout); @@ -25,8 +24,7 @@ public: } } - explicit KScopedResourceReservation(std::shared_ptr<KResourceLimit> l, LimitableResource r, - s64 v = 1) + explicit KScopedResourceReservation(KResourceLimit* l, LimitableResource r, s64 v = 1) : resource_limit(std::move(l)), value(v), resource(r) { if (resource_limit && value) { success = resource_limit->Reserve(resource, value); @@ -35,10 +33,10 @@ public: } } - explicit KScopedResourceReservation(const Process* p, LimitableResource r, s64 v, s64 t) + explicit KScopedResourceReservation(const KProcess* p, LimitableResource r, s64 v, s64 t) : KScopedResourceReservation(p->GetResourceLimit(), r, v, t) {} - explicit KScopedResourceReservation(const Process* p, LimitableResource r, s64 v = 1) + explicit KScopedResourceReservation(const KProcess* p, LimitableResource r, s64 v = 1) : KScopedResourceReservation(p->GetResourceLimit(), r, v) {} ~KScopedResourceReservation() noexcept { @@ -58,7 +56,7 @@ public: } private: - std::shared_ptr<KResourceLimit> resource_limit; + KResourceLimit* resource_limit{}; s64 value; LimitableResource resource; bool success; diff --git a/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h b/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h index ebecf0c77..b5d405744 100644 --- a/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h +++ b/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h @@ -8,7 +8,7 @@ #pragma once #include "common/common_types.h" -#include "core/hle/kernel/handle_table.h" +#include "core/hle/kernel/k_handle_table.h" #include "core/hle/kernel/k_thread.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/time_manager.h" diff --git a/src/core/hle/kernel/k_server_port.cpp b/src/core/hle/kernel/k_server_port.cpp new file mode 100644 index 000000000..5e44c48e2 --- /dev/null +++ b/src/core/hle/kernel/k_server_port.cpp @@ -0,0 +1,104 @@ +// Copyright 2021 yuzu emulator team +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include <tuple> +#include "common/assert.h" +#include "core/hle/kernel/k_client_port.h" +#include "core/hle/kernel/k_port.h" +#include "core/hle/kernel/k_scheduler.h" +#include "core/hle/kernel/k_server_port.h" +#include "core/hle/kernel/k_server_session.h" +#include "core/hle/kernel/k_thread.h" +#include "core/hle/kernel/svc_results.h" + +namespace Kernel { + +KServerPort::KServerPort(KernelCore& kernel) : KSynchronizationObject{kernel} {} +KServerPort::~KServerPort() = default; + +void KServerPort::Initialize(KPort* parent_, std::string&& name_) { + // Set member variables. + parent = parent_; + name = std::move(name_); +} + +bool KServerPort::IsLight() const { + return this->GetParent()->IsLight(); +} + +void KServerPort::CleanupSessions() { + // Ensure our preconditions are met. + if (this->IsLight()) { + UNIMPLEMENTED(); + } + + // Cleanup the session list. + while (true) { + // Get the last session in the list + KServerSession* session = nullptr; + { + KScopedSchedulerLock sl{kernel}; + if (!session_list.empty()) { + session = std::addressof(session_list.front()); + session_list.pop_front(); + } + } + + // Close the session. + if (session != nullptr) { + session->Close(); + } else { + break; + } + } +} + +void KServerPort::Destroy() { + // Note with our parent that we're closed. + parent->OnServerClosed(); + + // Perform necessary cleanup of our session lists. + this->CleanupSessions(); + + // Close our reference to our parent. + parent->Close(); +} + +bool KServerPort::IsSignaled() const { + if (this->IsLight()) { + UNIMPLEMENTED(); + return false; + } else { + return !session_list.empty(); + } +} + +void KServerPort::EnqueueSession(KServerSession* session) { + ASSERT(!this->IsLight()); + + KScopedSchedulerLock sl{kernel}; + + // Add the session to our queue. + session_list.push_back(*session); + if (session_list.size() == 1) { + this->NotifyAvailable(); + } +} + +KServerSession* KServerPort::AcceptSession() { + ASSERT(!this->IsLight()); + + KScopedSchedulerLock sl{kernel}; + + // Return the first session in the list. + if (session_list.empty()) { + return nullptr; + } + + KServerSession* session = std::addressof(session_list.front()); + session_list.pop_front(); + return session; +} + +} // namespace Kernel diff --git a/src/core/hle/kernel/k_server_port.h b/src/core/hle/kernel/k_server_port.h new file mode 100644 index 000000000..558c8ed4d --- /dev/null +++ b/src/core/hle/kernel/k_server_port.h @@ -0,0 +1,80 @@ +// Copyright 2021 yuzu emulator team +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include <memory> +#include <string> +#include <utility> +#include <vector> + +#include <boost/intrusive/list.hpp> + +#include "common/common_types.h" +#include "core/hle/kernel/k_server_session.h" +#include "core/hle/kernel/k_synchronization_object.h" +#include "core/hle/result.h" + +namespace Kernel { + +class KernelCore; +class KPort; +class SessionRequestHandler; + +class KServerPort final : public KSynchronizationObject { + KERNEL_AUTOOBJECT_TRAITS(KServerPort, KSynchronizationObject); + +private: + using SessionList = boost::intrusive::list<KServerSession>; + +public: + explicit KServerPort(KernelCore& kernel); + virtual ~KServerPort() override; + + using HLEHandler = std::shared_ptr<SessionRequestHandler>; + + void Initialize(KPort* parent_, std::string&& name_); + + /// Whether or not this server port has an HLE handler available. + bool HasHLEHandler() const { + return hle_handler != nullptr; + } + + /// Gets the HLE handler for this port. + HLEHandler GetHLEHandler() const { + return hle_handler; + } + + /** + * Sets the HLE handler template for the port. ServerSessions crated by connecting to this port + * will inherit a reference to this handler. + */ + void SetHleHandler(HLEHandler hle_handler_) { + hle_handler = std::move(hle_handler_); + } + + void EnqueueSession(KServerSession* pending_session); + + KServerSession* AcceptSession(); + + const KPort* GetParent() const { + return parent; + } + + bool IsLight() const; + + // Overridden virtual functions. + virtual void Destroy() override; + virtual bool IsSignaled() const override; + +private: + void CleanupSessions(); + +private: + SessionList session_list; + HLEHandler hle_handler; + KPort* parent{}; +}; + +} // namespace Kernel diff --git a/src/core/hle/kernel/server_session.cpp b/src/core/hle/kernel/k_server_session.cpp index 790dbb998..c8acaa453 100644 --- a/src/core/hle/kernel/server_session.cpp +++ b/src/core/hle/kernel/k_server_session.cpp @@ -10,49 +10,39 @@ #include "common/logging/log.h" #include "core/core_timing.h" #include "core/hle/ipc_helpers.h" -#include "core/hle/kernel/client_port.h" -#include "core/hle/kernel/client_session.h" -#include "core/hle/kernel/handle_table.h" #include "core/hle/kernel/hle_ipc.h" +#include "core/hle/kernel/k_client_port.h" +#include "core/hle/kernel/k_handle_table.h" +#include "core/hle/kernel/k_process.h" #include "core/hle/kernel/k_scheduler.h" +#include "core/hle/kernel/k_server_session.h" +#include "core/hle/kernel/k_session.h" #include "core/hle/kernel/k_thread.h" #include "core/hle/kernel/kernel.h" -#include "core/hle/kernel/process.h" -#include "core/hle/kernel/server_session.h" -#include "core/hle/kernel/session.h" #include "core/memory.h" namespace Kernel { -ServerSession::ServerSession(KernelCore& kernel) : KSynchronizationObject{kernel} {} +KServerSession::KServerSession(KernelCore& kernel) : KSynchronizationObject{kernel} {} -ServerSession::~ServerSession() { +KServerSession::~KServerSession() { kernel.ReleaseServiceThread(service_thread); } -ResultVal<std::shared_ptr<ServerSession>> ServerSession::Create(KernelCore& kernel, - std::shared_ptr<Session> parent, - std::string name) { - std::shared_ptr<ServerSession> session{std::make_shared<ServerSession>(kernel)}; - - session->name = std::move(name); - session->parent = std::move(parent); - session->service_thread = kernel.CreateServiceThread(session->name); - - return MakeResult(std::move(session)); +void KServerSession::Initialize(KSession* parent_, std::string&& name_) { + // Set member variables. + parent = parent_; + name = std::move(name_); + service_thread = kernel.CreateServiceThread(name); } -bool ServerSession::IsSignaled() const { - // Closed sessions should never wait, an error will be returned from svcReplyAndReceive. - if (!parent->Client()) { - return true; - } +void KServerSession::Destroy() { + parent->OnServerClosed(); - // Wait if we have no pending requests, or if we're currently handling a request. - return !pending_requesting_threads.empty() && currently_handling == nullptr; + parent->Close(); } -void ServerSession::ClientDisconnected() { +void KServerSession::OnClientClosed() { // We keep a shared pointer to the hle handler to keep it alive throughout // the call to ClientDisconnected, as ClientDisconnected invalidates the // hle_handler member itself during the course of the function executing. @@ -60,24 +50,31 @@ void ServerSession::ClientDisconnected() { if (handler) { // Note that after this returns, this server session's hle_handler is // invalidated (set to null). - handler->ClientDisconnected(SharedFrom(this)); + handler->ClientDisconnected(this); + } +} + +bool KServerSession::IsSignaled() const { + ASSERT(kernel.GlobalSchedulerContext().IsLocked()); + + // If the client is closed, we're always signaled. + if (parent->IsClientClosed()) { + return true; } - // Clean up the list of client threads with pending requests, they are unneeded now that the - // client endpoint is closed. - pending_requesting_threads.clear(); - currently_handling = nullptr; + // Otherwise, we're signaled if we have a request and aren't handling one. + return false; } -void ServerSession::AppendDomainRequestHandler(std::shared_ptr<SessionRequestHandler> handler) { +void KServerSession::AppendDomainRequestHandler(std::shared_ptr<SessionRequestHandler> handler) { domain_request_handlers.push_back(std::move(handler)); } -std::size_t ServerSession::NumDomainRequestHandlers() const { +std::size_t KServerSession::NumDomainRequestHandlers() const { return domain_request_handlers.size(); } -ResultCode ServerSession::HandleDomainSyncRequest(Kernel::HLERequestContext& context) { +ResultCode KServerSession::HandleDomainSyncRequest(Kernel::HLERequestContext& context) { if (!context.HasDomainMessageHeader()) { return RESULT_SUCCESS; } @@ -116,23 +113,21 @@ ResultCode ServerSession::HandleDomainSyncRequest(Kernel::HLERequestContext& con return RESULT_SUCCESS; } -ResultCode ServerSession::QueueSyncRequest(std::shared_ptr<KThread> thread, - Core::Memory::Memory& memory) { +ResultCode KServerSession::QueueSyncRequest(KThread* thread, Core::Memory::Memory& memory) { u32* cmd_buf{reinterpret_cast<u32*>(memory.GetPointer(thread->GetTLSAddress()))}; - auto context = - std::make_shared<HLERequestContext>(kernel, memory, SharedFrom(this), std::move(thread)); + auto context = std::make_shared<HLERequestContext>(kernel, memory, this, thread); context->PopulateFromIncomingCommandBuffer(kernel.CurrentProcess()->GetHandleTable(), cmd_buf); if (auto strong_ptr = service_thread.lock()) { - strong_ptr->QueueSyncRequest(*this, std::move(context)); + strong_ptr->QueueSyncRequest(*parent, std::move(context)); return RESULT_SUCCESS; } return RESULT_SUCCESS; } -ResultCode ServerSession::CompleteSyncRequest(HLERequestContext& context) { +ResultCode KServerSession::CompleteSyncRequest(HLERequestContext& context) { ResultCode result = RESULT_SUCCESS; // If the session has been converted to a domain, handle the domain request if (IsDomain() && context.HasDomainMessageHeader()) { @@ -161,10 +156,9 @@ ResultCode ServerSession::CompleteSyncRequest(HLERequestContext& context) { return result; } -ResultCode ServerSession::HandleSyncRequest(std::shared_ptr<KThread> thread, - Core::Memory::Memory& memory, - Core::Timing::CoreTiming& core_timing) { - return QueueSyncRequest(std::move(thread), memory); +ResultCode KServerSession::HandleSyncRequest(KThread* thread, Core::Memory::Memory& memory, + Core::Timing::CoreTiming& core_timing) { + return QueueSyncRequest(thread, memory); } } // namespace Kernel diff --git a/src/core/hle/kernel/server_session.h b/src/core/hle/kernel/k_server_session.h index c42d5ee59..77095bb85 100644 --- a/src/core/hle/kernel/server_session.h +++ b/src/core/hle/kernel/k_server_session.h @@ -9,6 +9,8 @@ #include <utility> #include <vector> +#include <boost/intrusive/list.hpp> + #include "common/threadsafe_queue.h" #include "core/hle/kernel/k_synchronization_object.h" #include "core/hle/kernel/service_thread.h" @@ -27,55 +29,35 @@ namespace Kernel { class HLERequestContext; class KernelCore; -class Session; +class KSession; class SessionRequestHandler; class KThread; -/** - * Kernel object representing the server endpoint of an IPC session. Sessions are the basic CTR-OS - * primitive for communication between different processes, and are used to implement service calls - * to the various system services. - * - * To make a service call, the client must write the command header and parameters to the buffer - * located at offset 0x80 of the TLS (Thread-Local Storage) area, then execute a SendSyncRequest - * SVC call with its ClientSession handle. The kernel will read the command header, using it to - * marshall the parameters to the process at the server endpoint of the session. - * After the server replies to the request, the response is marshalled back to the caller's - * TLS buffer and control is transferred back to it. - */ -class ServerSession final : public KSynchronizationObject { +class KServerSession final : public KSynchronizationObject, + public boost::intrusive::list_base_hook<> { + KERNEL_AUTOOBJECT_TRAITS(KServerSession, KSynchronizationObject); + friend class ServiceThread; public: - explicit ServerSession(KernelCore& kernel); - ~ServerSession() override; + explicit KServerSession(KernelCore& kernel); + virtual ~KServerSession() override; - friend class Session; + virtual void Destroy() override; - static ResultVal<std::shared_ptr<ServerSession>> Create(KernelCore& kernel, - std::shared_ptr<Session> parent, - std::string name = "Unknown"); + void Initialize(KSession* parent_, std::string&& name_); - std::string GetTypeName() const override { - return "ServerSession"; + KSession* GetParent() { + return parent; } - std::string GetName() const override { - return name; + const KSession* GetParent() const { + return parent; } - static constexpr HandleType HANDLE_TYPE = HandleType::ServerSession; - HandleType GetHandleType() const override { - return HANDLE_TYPE; - } + virtual bool IsSignaled() const override; - Session* GetParent() { - return parent.get(); - } - - const Session* GetParent() const { - return parent.get(); - } + void OnClientClosed(); /** * Sets the HLE handler for the session. This handler will be called to service IPC requests @@ -95,12 +77,9 @@ public: * * @returns ResultCode from the operation. */ - ResultCode HandleSyncRequest(std::shared_ptr<KThread> thread, Core::Memory::Memory& memory, + ResultCode HandleSyncRequest(KThread* thread, Core::Memory::Memory& memory, Core::Timing::CoreTiming& core_timing); - /// Called when a client disconnection occurs. - void ClientDisconnected(); - /// Adds a new domain request handler to the collection of request handlers within /// this ServerSession instance. void AppendDomainRequestHandler(std::shared_ptr<SessionRequestHandler> handler); @@ -124,13 +103,9 @@ public: convert_to_domain = true; } - bool IsSignaled() const override; - - void Finalize() override {} - private: /// Queues a sync request from the emulated application. - ResultCode QueueSyncRequest(std::shared_ptr<KThread> thread, Core::Memory::Memory& memory); + ResultCode QueueSyncRequest(KThread* thread, Core::Memory::Memory& memory); /// Completes a sync request from the emulated application. ResultCode CompleteSyncRequest(HLERequestContext& context); @@ -139,33 +114,20 @@ private: /// object handle. ResultCode HandleDomainSyncRequest(Kernel::HLERequestContext& context); - /// The parent session, which links to the client endpoint. - std::shared_ptr<Session> parent; - /// This session's HLE request handler (applicable when not a domain) std::shared_ptr<SessionRequestHandler> hle_handler; /// This is the list of domain request handlers (after conversion to a domain) std::vector<std::shared_ptr<SessionRequestHandler>> domain_request_handlers; - /// List of threads that are pending a response after a sync request. This list is processed in - /// a LIFO manner, thus, the last request will be dispatched first. - /// TODO(Subv): Verify if this is indeed processed in LIFO using a hardware test. - std::vector<std::shared_ptr<KThread>> pending_requesting_threads; - - /// Thread whose request is currently being handled. A request is considered "handled" when a - /// response is sent via svcReplyAndReceive. - /// TODO(Subv): Find a better name for this. - std::shared_ptr<KThread> currently_handling; - /// When set to True, converts the session to a domain at the end of the command bool convert_to_domain{}; - /// The name of this session (optional) - std::string name; - /// Thread to dispatch service requests std::weak_ptr<ServiceThread> service_thread; + + /// KSession that owns this KServerSession + KSession* parent{}; }; } // namespace Kernel diff --git a/src/core/hle/kernel/k_session.cpp b/src/core/hle/kernel/k_session.cpp new file mode 100644 index 000000000..7b0bc177d --- /dev/null +++ b/src/core/hle/kernel/k_session.cpp @@ -0,0 +1,85 @@ +// Copyright 2021 yuzu emulator team +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "common/assert.h" +#include "core/hle/kernel/k_client_port.h" +#include "core/hle/kernel/k_client_session.h" +#include "core/hle/kernel/k_scoped_resource_reservation.h" +#include "core/hle/kernel/k_server_session.h" +#include "core/hle/kernel/k_session.h" + +namespace Kernel { + +KSession::KSession(KernelCore& kernel) + : KAutoObjectWithSlabHeapAndContainer{kernel}, server{kernel}, client{kernel} {} +KSession::~KSession() = default; + +void KSession::Initialize(KClientPort* port_, const std::string& name_) { + // Increment reference count. + // Because reference count is one on creation, this will result + // in a reference count of two. Thus, when both server and client are closed + // this object will be destroyed. + Open(); + + // Create our sub sessions. + KAutoObject::Create(std::addressof(server)); + KAutoObject::Create(std::addressof(client)); + + // Initialize our sub sessions. + server.Initialize(this, name_ + ":Server"); + client.Initialize(this, name_ + ":Client"); + + // Set state and name. + SetState(State::Normal); + name = name_; + + // Set our owner process. + process = kernel.CurrentProcess(); + process->Open(); + + // Set our port. + port = port_; + if (port != nullptr) { + port->Open(); + } + + // Mark initialized. + initialized = true; +} + +void KSession::Finalize() { + if (port == nullptr) { + return; + } + + port->OnSessionFinalized(); + port->Close(); +} + +void KSession::OnServerClosed() { + if (GetState() != State::Normal) { + return; + } + + SetState(State::ServerClosed); + client.OnServerClosed(); +} + +void KSession::OnClientClosed() { + if (GetState() != State::Normal) { + return; + } + + SetState(State::ClientClosed); + server.OnClientClosed(); +} + +void KSession::PostDestroy(uintptr_t arg) { + // Release the session count resource the owner process holds. + KProcess* owner = reinterpret_cast<KProcess*>(arg); + owner->GetResourceLimit()->Release(LimitableResource::Sessions, 1); + owner->Close(); +} + +} // namespace Kernel diff --git a/src/core/hle/kernel/k_session.h b/src/core/hle/kernel/k_session.h new file mode 100644 index 000000000..4321b7885 --- /dev/null +++ b/src/core/hle/kernel/k_session.h @@ -0,0 +1,96 @@ +// Copyright 2021 yuzu emulator team +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include <atomic> +#include <string> + +#include "core/hle/kernel/k_client_session.h" +#include "core/hle/kernel/k_server_session.h" +#include "core/hle/kernel/slab_helpers.h" + +namespace Kernel { + +class KSession final : public KAutoObjectWithSlabHeapAndContainer<KSession, KAutoObjectWithList> { + KERNEL_AUTOOBJECT_TRAITS(KSession, KAutoObject); + +public: + explicit KSession(KernelCore& kernel); + virtual ~KSession() override; + + void Initialize(KClientPort* port_, const std::string& name_); + + virtual void Finalize() override; + + virtual bool IsInitialized() const override { + return initialized; + } + + virtual uintptr_t GetPostDestroyArgument() const override { + return reinterpret_cast<uintptr_t>(process); + } + + static void PostDestroy(uintptr_t arg); + + void OnServerClosed(); + + void OnClientClosed(); + + bool IsServerClosed() const { + return this->GetState() != State::Normal; + } + + bool IsClientClosed() const { + return this->GetState() != State::Normal; + } + + KClientSession& GetClientSession() { + return client; + } + + KServerSession& GetServerSession() { + return server; + } + + const KClientSession& GetClientSession() const { + return client; + } + + const KServerSession& GetServerSession() const { + return server; + } + + const KClientPort* GetParent() const { + return port; + } + +private: + enum class State : u8 { + Invalid = 0, + Normal = 1, + ClientClosed = 2, + ServerClosed = 3, + }; + +private: + void SetState(State state) { + atomic_state = static_cast<u8>(state); + } + + State GetState() const { + return static_cast<State>(atomic_state.load(std::memory_order_relaxed)); + } + +private: + KServerSession server; + KClientSession client; + std::atomic<std::underlying_type_t<State>> atomic_state{ + static_cast<std::underlying_type_t<State>>(State::Invalid)}; + KClientPort* port{}; + KProcess* process{}; + bool initialized{}; +}; + +} // namespace Kernel diff --git a/src/core/hle/kernel/k_shared_memory.cpp b/src/core/hle/kernel/k_shared_memory.cpp index 9b14f42b5..1da57a4c3 100644 --- a/src/core/hle/kernel/k_shared_memory.cpp +++ b/src/core/hle/kernel/k_shared_memory.cpp @@ -8,50 +8,74 @@ #include "core/hle/kernel/k_scoped_resource_reservation.h" #include "core/hle/kernel/k_shared_memory.h" #include "core/hle/kernel/kernel.h" +#include "core/hle/kernel/svc_results.h" namespace Kernel { -KSharedMemory::KSharedMemory(KernelCore& kernel, Core::DeviceMemory& device_memory) - : Object{kernel}, device_memory{device_memory} {} +KSharedMemory::KSharedMemory(KernelCore& kernel) : KAutoObjectWithSlabHeapAndContainer{kernel} {} KSharedMemory::~KSharedMemory() { kernel.GetSystemResourceLimit()->Release(LimitableResource::PhysicalMemory, size); } -std::shared_ptr<KSharedMemory> KSharedMemory::Create( - KernelCore& kernel, Core::DeviceMemory& device_memory, Process* owner_process, - KPageLinkedList&& page_list, KMemoryPermission owner_permission, - KMemoryPermission user_permission, PAddr physical_address, std::size_t size, std::string name) { +ResultCode KSharedMemory::Initialize(KernelCore& kernel_, Core::DeviceMemory& device_memory_, + KProcess* owner_process_, KPageLinkedList&& page_list_, + Svc::MemoryPermission owner_permission_, + Svc::MemoryPermission user_permission_, + PAddr physical_address_, std::size_t size_, + std::string name_) { + // Set members. + owner_process = owner_process_; + device_memory = &device_memory_; + page_list = std::move(page_list_); + owner_permission = owner_permission_; + user_permission = user_permission_; + physical_address = physical_address_; + size = size_; + name = name_; - const auto resource_limit = kernel.GetSystemResourceLimit(); - KScopedResourceReservation memory_reservation(resource_limit, LimitableResource::PhysicalMemory, - size); - ASSERT(memory_reservation.Succeeded()); + // Get the resource limit. + KResourceLimit* reslimit = kernel.GetSystemResourceLimit(); - std::shared_ptr<KSharedMemory> shared_memory{ - std::make_shared<KSharedMemory>(kernel, device_memory)}; - - shared_memory->owner_process = owner_process; - shared_memory->page_list = std::move(page_list); - shared_memory->owner_permission = owner_permission; - shared_memory->user_permission = user_permission; - shared_memory->physical_address = physical_address; - shared_memory->size = size; - shared_memory->name = name; + // Reserve memory for ourselves. + KScopedResourceReservation memory_reservation(reslimit, LimitableResource::PhysicalMemory, + size_); + R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); + // Commit our reservation. memory_reservation.Commit(); - return shared_memory; + + // Set our resource limit. + resource_limit = reslimit; + resource_limit->Open(); + + // Mark initialized. + is_initialized = true; + + // Clear all pages in the memory. + std::memset(device_memory_.GetPointer(physical_address_), 0, size_); + + return RESULT_SUCCESS; +} + +void KSharedMemory::Finalize() { + // Release the memory reservation. + resource_limit->Release(LimitableResource::PhysicalMemory, size); + resource_limit->Close(); + + // Perform inherited finalization. + KAutoObjectWithSlabHeapAndContainer<KSharedMemory, KAutoObjectWithList>::Finalize(); } -ResultCode KSharedMemory::Map(Process& target_process, VAddr address, std::size_t size, - KMemoryPermission permissions) { +ResultCode KSharedMemory::Map(KProcess& target_process, VAddr address, std::size_t size, + Svc::MemoryPermission permissions) { const u64 page_count{(size + PageSize - 1) / PageSize}; if (page_list.GetNumPages() != page_count) { UNIMPLEMENTED_MSG("Page count does not match"); } - const KMemoryPermission expected = + const Svc::MemoryPermission expected = &target_process == owner_process ? owner_permission : user_permission; if (permissions != expected) { @@ -59,7 +83,17 @@ ResultCode KSharedMemory::Map(Process& target_process, VAddr address, std::size_ } return target_process.PageTable().MapPages(address, page_list, KMemoryState::Shared, - permissions); + ConvertToKMemoryPermission(permissions)); +} + +ResultCode KSharedMemory::Unmap(KProcess& target_process, VAddr address, std::size_t size) { + const u64 page_count{(size + PageSize - 1) / PageSize}; + + if (page_list.GetNumPages() != page_count) { + UNIMPLEMENTED_MSG("Page count does not match"); + } + + return target_process.PageTable().UnmapPages(address, page_list, KMemoryState::Shared); } } // namespace Kernel diff --git a/src/core/hle/kernel/k_shared_memory.h b/src/core/hle/kernel/k_shared_memory.h index 016e34be5..28939c93c 100644 --- a/src/core/hle/kernel/k_shared_memory.h +++ b/src/core/hle/kernel/k_shared_memory.h @@ -11,37 +11,27 @@ #include "core/device_memory.h" #include "core/hle/kernel/k_memory_block.h" #include "core/hle/kernel/k_page_linked_list.h" -#include "core/hle/kernel/object.h" -#include "core/hle/kernel/process.h" +#include "core/hle/kernel/k_process.h" +#include "core/hle/kernel/slab_helpers.h" #include "core/hle/result.h" namespace Kernel { class KernelCore; -class KSharedMemory final : public Object { +class KSharedMemory final + : public KAutoObjectWithSlabHeapAndContainer<KSharedMemory, KAutoObjectWithList> { + KERNEL_AUTOOBJECT_TRAITS(KSharedMemory, KAutoObject); + public: - explicit KSharedMemory(KernelCore& kernel, Core::DeviceMemory& device_memory); + explicit KSharedMemory(KernelCore& kernel); ~KSharedMemory() override; - static std::shared_ptr<KSharedMemory> Create( - KernelCore& kernel, Core::DeviceMemory& device_memory, Process* owner_process, - KPageLinkedList&& page_list, KMemoryPermission owner_permission, - KMemoryPermission user_permission, PAddr physical_address, std::size_t size, - std::string name); - - std::string GetTypeName() const override { - return "SharedMemory"; - } - - std::string GetName() const override { - return name; - } - - static constexpr HandleType HANDLE_TYPE = HandleType::SharedMemory; - HandleType GetHandleType() const override { - return HANDLE_TYPE; - } + ResultCode Initialize(KernelCore& kernel_, Core::DeviceMemory& device_memory_, + KProcess* owner_process_, KPageLinkedList&& page_list_, + Svc::MemoryPermission owner_permission_, + Svc::MemoryPermission user_permission_, PAddr physical_address_, + std::size_t size_, std::string name_); /** * Maps a shared memory block to an address in the target process' address space @@ -50,8 +40,16 @@ public: * @param size Size of the shared memory block to map * @param permissions Memory block map permissions (specified by SVC field) */ - ResultCode Map(Process& target_process, VAddr address, std::size_t size, - KMemoryPermission permissions); + ResultCode Map(KProcess& target_process, VAddr address, std::size_t size, + Svc::MemoryPermission permissions); + + /** + * Unmaps a shared memory block from an address in the target process' address space + * @param target_process Process on which to unmap the memory block + * @param address Address in system memory to unmap shared memory block + * @param size Size of the shared memory block to unmap + */ + ResultCode Unmap(KProcess& target_process, VAddr address, std::size_t size); /** * Gets a pointer to the shared memory block @@ -59,7 +57,7 @@ public: * @return A pointer to the shared memory block from the specified offset */ u8* GetPointer(std::size_t offset = 0) { - return device_memory.GetPointer(physical_address + offset); + return device_memory->GetPointer(physical_address + offset); } /** @@ -68,20 +66,26 @@ public: * @return A pointer to the shared memory block from the specified offset */ const u8* GetPointer(std::size_t offset = 0) const { - return device_memory.GetPointer(physical_address + offset); + return device_memory->GetPointer(physical_address + offset); } - void Finalize() override {} + virtual void Finalize() override; + + virtual bool IsInitialized() const override { + return is_initialized; + } + static void PostDestroy([[maybe_unused]] uintptr_t arg) {} private: - Core::DeviceMemory& device_memory; - Process* owner_process{}; + Core::DeviceMemory* device_memory; + KProcess* owner_process{}; KPageLinkedList page_list; - KMemoryPermission owner_permission{}; - KMemoryPermission user_permission{}; + Svc::MemoryPermission owner_permission{}; + Svc::MemoryPermission user_permission{}; PAddr physical_address{}; std::size_t size{}; - std::string name; + KResourceLimit* resource_limit{}; + bool is_initialized{}; }; } // namespace Kernel diff --git a/src/core/hle/kernel/k_slab_heap.h b/src/core/hle/kernel/k_slab_heap.h index aa4471d2f..5ce9a1d7c 100644 --- a/src/core/hle/kernel/k_slab_heap.h +++ b/src/core/hle/kernel/k_slab_heap.h @@ -97,6 +97,7 @@ public: void FreeImpl(void* obj) { // Don't allow freeing an object that wasn't allocated from this heap ASSERT(Contains(reinterpret_cast<uintptr_t>(obj))); + impl.Free(obj); } @@ -148,6 +149,14 @@ public: return obj; } + T* AllocateWithKernel(KernelCore& kernel) { + T* obj = static_cast<T*>(AllocateImpl()); + if (obj != nullptr) { + new (obj) T(kernel); + } + return obj; + } + void Free(T* obj) { FreeImpl(obj); } diff --git a/src/core/hle/kernel/k_synchronization_object.cpp b/src/core/hle/kernel/k_synchronization_object.cpp index 82f72a0fe..460b8a714 100644 --- a/src/core/hle/kernel/k_synchronization_object.cpp +++ b/src/core/hle/kernel/k_synchronization_object.cpp @@ -13,6 +13,11 @@ namespace Kernel { +void KSynchronizationObject::Finalize() { + this->OnFinalizeSynchronizationObject(); + KAutoObject::Finalize(); +} + ResultCode KSynchronizationObject::Wait(KernelCore& kernel, s32* out_index, KSynchronizationObject** objects, const s32 num_objects, s64 timeout) { @@ -130,10 +135,7 @@ ResultCode KSynchronizationObject::Wait(KernelCore& kernel, s32* out_index, return wait_result; } -KSynchronizationObject::KSynchronizationObject(KernelCore& kernel) : Object{kernel} {} - -KSynchronizationObject::KSynchronizationObject(KernelCore& kernel, std::string&& name) - : Object{kernel, std::move(name)} {} +KSynchronizationObject::KSynchronizationObject(KernelCore& kernel) : KAutoObjectWithList{kernel} {} KSynchronizationObject::~KSynchronizationObject() = default; diff --git a/src/core/hle/kernel/k_synchronization_object.h b/src/core/hle/kernel/k_synchronization_object.h index 5803718fd..a41dd1220 100644 --- a/src/core/hle/kernel/k_synchronization_object.h +++ b/src/core/hle/kernel/k_synchronization_object.h @@ -6,7 +6,7 @@ #include <vector> -#include "core/hle/kernel/object.h" +#include "core/hle/kernel/k_auto_object.h" #include "core/hle/result.h" namespace Kernel { @@ -16,7 +16,9 @@ class Synchronization; class KThread; /// Class that represents a Kernel object that a thread can be waiting on -class KSynchronizationObject : public Object { +class KSynchronizationObject : public KAutoObjectWithList { + KERNEL_AUTOOBJECT_TRAITS(KSynchronizationObject, KAutoObject); + public: struct ThreadListNode { ThreadListNode* next{}; @@ -27,15 +29,18 @@ public: KSynchronizationObject** objects, const s32 num_objects, s64 timeout); + virtual void Finalize() override; + [[nodiscard]] virtual bool IsSignaled() const = 0; [[nodiscard]] std::vector<KThread*> GetWaitingThreadsForDebugging() const; protected: explicit KSynchronizationObject(KernelCore& kernel); - explicit KSynchronizationObject(KernelCore& kernel, std::string&& name); virtual ~KSynchronizationObject(); + virtual void OnFinalizeSynchronizationObject() {} + void NotifyAvailable(ResultCode result); void NotifyAvailable() { return this->NotifyAvailable(RESULT_SUCCESS); @@ -46,14 +51,4 @@ private: ThreadListNode* thread_list_tail{}; }; -// Specialization of DynamicObjectCast for KSynchronizationObjects -template <> -inline std::shared_ptr<KSynchronizationObject> DynamicObjectCast<KSynchronizationObject>( - std::shared_ptr<Object> object) { - if (object != nullptr && object->IsWaitable()) { - return std::static_pointer_cast<KSynchronizationObject>(object); - } - return nullptr; -} - } // namespace Kernel diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp index e0f53287c..ef6dfeeca 100644 --- a/src/core/hle/kernel/k_thread.cpp +++ b/src/core/hle/kernel/k_thread.cpp @@ -18,17 +18,16 @@ #include "core/core.h" #include "core/cpu_manager.h" #include "core/hardware_properties.h" -#include "core/hle/kernel/handle_table.h" #include "core/hle/kernel/k_condition_variable.h" +#include "core/hle/kernel/k_handle_table.h" #include "core/hle/kernel/k_memory_layout.h" +#include "core/hle/kernel/k_process.h" #include "core/hle/kernel/k_resource_limit.h" #include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" #include "core/hle/kernel/k_thread.h" #include "core/hle/kernel/k_thread_queue.h" #include "core/hle/kernel/kernel.h" -#include "core/hle/kernel/object.h" -#include "core/hle/kernel/process.h" #include "core/hle/kernel/svc_results.h" #include "core/hle/kernel/time_manager.h" #include "core/hle/result.h" @@ -62,11 +61,11 @@ static void ResetThreadContext64(Core::ARM_Interface::ThreadContext64& context, namespace Kernel { KThread::KThread(KernelCore& kernel) - : KSynchronizationObject{kernel}, activity_pause_lock{kernel} {} + : KAutoObjectWithSlabHeapAndContainer{kernel}, activity_pause_lock{kernel} {} KThread::~KThread() = default; ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack_top, s32 prio, - s32 virt_core, Process* owner, ThreadType type) { + s32 virt_core, KProcess* owner, ThreadType type) { // Assert parameters are valid. ASSERT((type == ThreadType::Main) || (Svc::HighestThreadPriority <= prio && prio <= Svc::LowestThreadPriority)); @@ -177,6 +176,7 @@ ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_s // Set parent, if relevant. if (owner != nullptr) { parent = owner; + parent->Open(); parent->IncrementThreadCount(); } @@ -209,14 +209,56 @@ ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_s } ResultCode KThread::InitializeThread(KThread* thread, KThreadFunction func, uintptr_t arg, - VAddr user_stack_top, s32 prio, s32 core, Process* owner, - ThreadType type) { + VAddr user_stack_top, s32 prio, s32 core, KProcess* owner, + ThreadType type, std::function<void(void*)>&& init_func, + void* init_func_parameter) { // Initialize the thread. R_TRY(thread->Initialize(func, arg, user_stack_top, prio, core, owner, type)); + // Initialize host context. + thread->host_context = + std::make_shared<Common::Fiber>(std::move(init_func), init_func_parameter); + return RESULT_SUCCESS; } +ResultCode KThread::InitializeDummyThread(KThread* thread) { + return thread->Initialize({}, {}, {}, DefaultThreadPriority, 3, {}, ThreadType::Main); +} + +ResultCode KThread::InitializeIdleThread(Core::System& system, KThread* thread, s32 virt_core) { + return InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {}, ThreadType::Main, + Core::CpuManager::GetIdleThreadStartFunc(), + system.GetCpuManager().GetStartFuncParamater()); +} + +ResultCode KThread::InitializeHighPriorityThread(Core::System& system, KThread* thread, + KThreadFunction func, uintptr_t arg, + s32 virt_core) { + return InitializeThread(thread, func, arg, {}, {}, virt_core, nullptr, ThreadType::HighPriority, + Core::CpuManager::GetSuspendThreadStartFunc(), + system.GetCpuManager().GetStartFuncParamater()); +} + +ResultCode KThread::InitializeUserThread(Core::System& system, KThread* thread, + KThreadFunction func, uintptr_t arg, VAddr user_stack_top, + s32 prio, s32 virt_core, KProcess* owner) { + system.Kernel().GlobalSchedulerContext().AddThread(thread); + return InitializeThread(thread, func, arg, user_stack_top, prio, virt_core, owner, + ThreadType::User, Core::CpuManager::GetGuestThreadStartFunc(), + system.GetCpuManager().GetStartFuncParamater()); +} + +void KThread::PostDestroy(uintptr_t arg) { + KProcess* owner = reinterpret_cast<KProcess*>(arg & ~1ULL); + const bool resource_limit_release_hint = (arg & 1); + const s64 hint_value = (resource_limit_release_hint ? 0 : 1); + if (owner != nullptr) { + owner->GetResourceLimit()->Release(LimitableResource::Threads, 1, hint_value); + owner->Close(); + } +} + void KThread::Finalize() { // If the thread has an owner process, unregister it. if (parent != nullptr) { @@ -246,8 +288,10 @@ void KThread::Finalize() { // Decrement the parent process's thread count. if (parent != nullptr) { parent->DecrementThreadCount(); - parent->GetResourceLimit()->Release(LimitableResource::Threads, 1); } + + // Perform inherited finalization. + KAutoObjectWithSlabHeapAndContainer<KThread, KSynchronizationObject>::Finalize(); } bool KThread::IsSignaled() const { @@ -294,6 +338,9 @@ void KThread::StartTermination() { // Register terminated dpc flag. RegisterDpc(DpcFlag::Terminated); + + // Close the thread. + this->Close(); } void KThread::Pin() { @@ -932,7 +979,7 @@ void KThread::Exit() { // Release the thread resource hint from parent. if (parent != nullptr) { - // TODO(bunnei): Hint that the resource is about to be released. + parent->GetResourceLimit()->Release(Kernel::LimitableResource::Threads, 0, 1); resource_limit_release_hint = true; } @@ -995,56 +1042,6 @@ std::shared_ptr<Common::Fiber>& KThread::GetHostContext() { return host_context; } -ResultVal<std::shared_ptr<KThread>> KThread::CreateThread(Core::System& system, - ThreadType type_flags, std::string name, - VAddr entry_point, u32 priority, u64 arg, - s32 processor_id, VAddr stack_top, - Process* owner_process) { - auto& kernel = system.Kernel(); - - std::shared_ptr<KThread> thread = std::make_shared<KThread>(kernel); - - if (const auto result = - thread->InitializeThread(thread.get(), entry_point, arg, stack_top, priority, - processor_id, owner_process, type_flags); - result.IsError()) { - return result; - } - - thread->name = name; - - auto& scheduler = kernel.GlobalSchedulerContext(); - scheduler.AddThread(thread); - - return MakeResult<std::shared_ptr<KThread>>(std::move(thread)); -} - -ResultVal<std::shared_ptr<KThread>> KThread::CreateThread( - Core::System& system, ThreadType type_flags, std::string name, VAddr entry_point, u32 priority, - u64 arg, s32 processor_id, VAddr stack_top, Process* owner_process, - std::function<void(void*)>&& thread_start_func, void* thread_start_parameter) { - auto thread_result = CreateThread(system, type_flags, name, entry_point, priority, arg, - processor_id, stack_top, owner_process); - - if (thread_result.Succeeded()) { - (*thread_result)->host_context = - std::make_shared<Common::Fiber>(std::move(thread_start_func), thread_start_parameter); - } - - return thread_result; -} - -ResultVal<std::shared_ptr<KThread>> KThread::CreateUserThread( - Core::System& system, ThreadType type_flags, std::string name, VAddr entry_point, u32 priority, - u64 arg, s32 processor_id, VAddr stack_top, Process* owner_process) { - std::function<void(void*)> init_func = Core::CpuManager::GetGuestThreadStartFunc(); - - void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater(); - - return CreateThread(system, type_flags, name, entry_point, priority, arg, processor_id, - stack_top, owner_process, std::move(init_func), init_func_parameter); -} - KThread* GetCurrentThreadPointer(KernelCore& kernel) { return kernel.GetCurrentEmuThread(); } diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h index b442dfe57..4145ef56c 100644 --- a/src/core/hle/kernel/k_thread.h +++ b/src/core/hle/kernel/k_thread.h @@ -19,7 +19,7 @@ #include "core/hle/kernel/k_light_lock.h" #include "core/hle/kernel/k_spin_lock.h" #include "core/hle/kernel/k_synchronization_object.h" -#include "core/hle/kernel/object.h" +#include "core/hle/kernel/slab_helpers.h" #include "core/hle/kernel/svc_common.h" #include "core/hle/kernel/svc_types.h" #include "core/hle/result.h" @@ -37,7 +37,7 @@ namespace Kernel { class GlobalSchedulerContext; class KernelCore; -class Process; +class KProcess; class KScheduler; class KThreadQueue; @@ -99,9 +99,13 @@ enum class ThreadWaitReasonForDebugging : u32 { [[nodiscard]] KThread& GetCurrentThread(KernelCore& kernel); [[nodiscard]] s32 GetCurrentCoreId(KernelCore& kernel); -class KThread final : public KSynchronizationObject, public boost::intrusive::list_base_hook<> { +class KThread final : public KAutoObjectWithSlabHeapAndContainer<KThread, KSynchronizationObject>, + public boost::intrusive::list_base_hook<> { + KERNEL_AUTOOBJECT_TRAITS(KThread, KSynchronizationObject); + +private: friend class KScheduler; - friend class Process; + friend class KProcess; public: static constexpr s32 DefaultThreadPriority = 44; @@ -115,74 +119,10 @@ public: using ThreadContext64 = Core::ARM_Interface::ThreadContext64; using WaiterList = boost::intrusive::list<KThread>; - /** - * Creates and returns a new thread. - * @param system The instance of the whole system - * @param name The friendly name desired for the thread - * @param entry_point The address at which the thread should start execution - * @param priority The thread's priority - * @param arg User data to pass to the thread - * @param processor_id The ID(s) of the processors on which the thread is desired to be run - * @param stack_top The address of the thread's stack top - * @param owner_process The parent process for the thread, if null, it's a kernel thread - * @return A shared pointer to the newly created thread - */ - [[nodiscard]] static ResultVal<std::shared_ptr<KThread>> CreateThread( - Core::System& system, ThreadType type_flags, std::string name, VAddr entry_point, - u32 priority, u64 arg, s32 processor_id, VAddr stack_top, Process* owner_process); - - /** - * Creates and returns a new thread, with a specified entry point. - * @param system The instance of the whole system - * @param name The friendly name desired for the thread - * @param entry_point The address at which the thread should start execution - * @param priority The thread's priority - * @param arg User data to pass to the thread - * @param processor_id The ID(s) of the processors on which the thread is desired to be run - * @param stack_top The address of the thread's stack top - * @param owner_process The parent process for the thread, if null, it's a kernel thread - * @param thread_start_func The function where the host context will start. - * @param thread_start_parameter The parameter which will passed to host context on init - * @return A shared pointer to the newly created thread - */ - [[nodiscard]] static ResultVal<std::shared_ptr<KThread>> CreateThread( - Core::System& system, ThreadType type_flags, std::string name, VAddr entry_point, - u32 priority, u64 arg, s32 processor_id, VAddr stack_top, Process* owner_process, - std::function<void(void*)>&& thread_start_func, void* thread_start_parameter); - - /** - * Creates and returns a new thread for the emulated "user" process. - * @param system The instance of the whole system - * @param name The friendly name desired for the thread - * @param entry_point The address at which the thread should start execution - * @param priority The thread's priority - * @param arg User data to pass to the thread - * @param processor_id The ID(s) of the processors on which the thread is desired to be run - * @param stack_top The address of the thread's stack top - * @param owner_process The parent process for the thread, if null, it's a kernel thread - * @return A shared pointer to the newly created thread - */ - [[nodiscard]] static ResultVal<std::shared_ptr<KThread>> CreateUserThread( - Core::System& system, ThreadType type_flags, std::string name, VAddr entry_point, - u32 priority, u64 arg, s32 processor_id, VAddr stack_top, Process* owner_process); - - [[nodiscard]] std::string GetName() const override { - return name; - } - void SetName(std::string new_name) { name = std::move(new_name); } - [[nodiscard]] std::string GetTypeName() const override { - return "Thread"; - } - - static constexpr HandleType HANDLE_TYPE = HandleType::Thread; - [[nodiscard]] HandleType GetHandleType() const override { - return HANDLE_TYPE; - } - /** * Gets the thread's current priority * @return The current thread's priority @@ -257,10 +197,6 @@ public: void Suspend(); - void Finalize() override; - - bool IsSignaled() const override; - void SetSyncedObject(KSynchronizationObject* obj, ResultCode wait_res) { synced_object = obj; wait_result = wait_res; @@ -354,11 +290,11 @@ public: current_core_id = core; } - [[nodiscard]] Process* GetOwnerProcess() { + [[nodiscard]] KProcess* GetOwnerProcess() { return parent; } - [[nodiscard]] const Process* GetOwnerProcess() const { + [[nodiscard]] const KProcess* GetOwnerProcess() const { return parent; } @@ -422,6 +358,40 @@ public: return termination_requested || GetRawState() == ThreadState::Terminated; } + [[nodiscard]] virtual u64 GetId() const override final { + return this->GetThreadID(); + } + + [[nodiscard]] virtual bool IsInitialized() const override { + return initialized; + } + + [[nodiscard]] virtual uintptr_t GetPostDestroyArgument() const override { + return reinterpret_cast<uintptr_t>(parent) | (resource_limit_release_hint ? 1 : 0); + } + + virtual void Finalize() override; + + [[nodiscard]] virtual bool IsSignaled() const override; + + static void PostDestroy(uintptr_t arg); + + [[nodiscard]] static ResultCode InitializeDummyThread(KThread* thread); + + [[nodiscard]] static ResultCode InitializeIdleThread(Core::System& system, KThread* thread, + s32 virt_core); + + [[nodiscard]] static ResultCode InitializeHighPriorityThread(Core::System& system, + KThread* thread, + KThreadFunction func, + uintptr_t arg, s32 virt_core); + + [[nodiscard]] static ResultCode InitializeUserThread(Core::System& system, KThread* thread, + KThreadFunction func, uintptr_t arg, + VAddr user_stack_top, s32 prio, + s32 virt_core, KProcess* owner); + +public: struct StackParameters { u8 svc_permission[0x10]; std::atomic<u8> dpc_flags; @@ -671,11 +641,13 @@ private: void StartTermination(); [[nodiscard]] ResultCode Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack_top, - s32 prio, s32 virt_core, Process* owner, ThreadType type); + s32 prio, s32 virt_core, KProcess* owner, ThreadType type); [[nodiscard]] static ResultCode InitializeThread(KThread* thread, KThreadFunction func, uintptr_t arg, VAddr user_stack_top, s32 prio, - s32 core, Process* owner, ThreadType type); + s32 core, KProcess* owner, ThreadType type, + std::function<void(void*)>&& init_func, + void* init_func_parameter); static void RestorePriority(KernelCore& kernel, KThread* thread); @@ -697,7 +669,7 @@ private: std::atomic<s64> cpu_time{}; KSynchronizationObject* synced_object{}; VAddr address_key{}; - Process* parent{}; + KProcess* parent{}; VAddr kernel_stack_top{}; u32* light_ipc_data{}; VAddr tls_address{}; @@ -742,7 +714,6 @@ private: VAddr mutex_wait_address_for_debugging{}; ThreadWaitReasonForDebugging wait_reason_for_debugging{}; ThreadType thread_type_for_debugging{}; - std::string name; public: using ConditionVariableThreadTreeType = ConditionVariableThreadTree; diff --git a/src/core/hle/kernel/k_transfer_memory.cpp b/src/core/hle/kernel/k_transfer_memory.cpp new file mode 100644 index 000000000..201617d32 --- /dev/null +++ b/src/core/hle/kernel/k_transfer_memory.cpp @@ -0,0 +1,45 @@ +// Copyright 2021 yuzu emulator team +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "core/hle/kernel/k_process.h" +#include "core/hle/kernel/k_resource_limit.h" +#include "core/hle/kernel/k_transfer_memory.h" +#include "core/hle/kernel/kernel.h" + +namespace Kernel { + +KTransferMemory::KTransferMemory(KernelCore& kernel) + : KAutoObjectWithSlabHeapAndContainer{kernel} {} + +KTransferMemory::~KTransferMemory() = default; + +ResultCode KTransferMemory::Initialize(VAddr address_, std::size_t size_, + Svc::MemoryPermission owner_perm_) { + // Set members. + owner = kernel.CurrentProcess(); + + // TODO(bunnei): Lock for transfer memory + + // Set remaining tracking members. + owner->Open(); + owner_perm = owner_perm_; + address = address_; + size = size_; + is_initialized = true; + + return RESULT_SUCCESS; +} + +void KTransferMemory::Finalize() { + // Perform inherited finalization. + KAutoObjectWithSlabHeapAndContainer<KTransferMemory, KAutoObjectWithList>::Finalize(); +} + +void KTransferMemory::PostDestroy(uintptr_t arg) { + KProcess* owner = reinterpret_cast<KProcess*>(arg); + owner->GetResourceLimit()->Release(LimitableResource::TransferMemory, 1); + owner->Close(); +} + +} // namespace Kernel diff --git a/src/core/hle/kernel/k_transfer_memory.h b/src/core/hle/kernel/k_transfer_memory.h new file mode 100644 index 000000000..f56398b9c --- /dev/null +++ b/src/core/hle/kernel/k_transfer_memory.h @@ -0,0 +1,66 @@ +// Copyright 2021 yuzu emulator team +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include <memory> + +#include "core/hle/kernel/slab_helpers.h" +#include "core/hle/kernel/svc_types.h" +#include "core/hle/result.h" + +union ResultCode; + +namespace Core::Memory { +class Memory; +} + +namespace Kernel { + +class KernelCore; +class KProcess; + +class KTransferMemory final + : public KAutoObjectWithSlabHeapAndContainer<KTransferMemory, KAutoObjectWithList> { + KERNEL_AUTOOBJECT_TRAITS(KTransferMemory, KAutoObject); + +public: + explicit KTransferMemory(KernelCore& kernel); + virtual ~KTransferMemory() override; + + ResultCode Initialize(VAddr address_, std::size_t size_, Svc::MemoryPermission owner_perm_); + + virtual void Finalize() override; + + virtual bool IsInitialized() const override { + return is_initialized; + } + + virtual uintptr_t GetPostDestroyArgument() const override { + return reinterpret_cast<uintptr_t>(owner); + } + + static void PostDestroy(uintptr_t arg); + + KProcess* GetOwner() const { + return owner; + } + + VAddr GetSourceAddress() const { + return address; + } + + size_t GetSize() const { + return is_initialized ? size * PageSize : 0; + } + +private: + KProcess* owner{}; + VAddr address{}; + Svc::MemoryPermission owner_perm{}; + size_t size{}; + bool is_initialized{}; +}; + +} // namespace Kernel diff --git a/src/core/hle/kernel/k_writable_event.cpp b/src/core/hle/kernel/k_writable_event.cpp index 25c52edb2..a430e0661 100644 --- a/src/core/hle/kernel/k_writable_event.cpp +++ b/src/core/hle/kernel/k_writable_event.cpp @@ -8,20 +8,28 @@ namespace Kernel { -KWritableEvent::KWritableEvent(KernelCore& kernel, std::string&& name) - : Object{kernel, std::move(name)} {} +KWritableEvent::KWritableEvent(KernelCore& kernel) : KAutoObjectWithSlabHeapAndContainer{kernel} {} + KWritableEvent::~KWritableEvent() = default; -void KWritableEvent::Initialize(KEvent* parent_) { +void KWritableEvent::Initialize(KEvent* parent_, std::string&& name_) { parent = parent_; + name = std::move(name_); + parent->GetReadableEvent().Open(); } ResultCode KWritableEvent::Signal() { - return parent->GetReadableEvent()->Signal(); + return parent->GetReadableEvent().Signal(); } ResultCode KWritableEvent::Clear() { - return parent->GetReadableEvent()->Clear(); + return parent->GetReadableEvent().Clear(); +} + +void KWritableEvent::Destroy() { + // Close our references. + parent->GetReadableEvent().Close(); + parent->Close(); } } // namespace Kernel diff --git a/src/core/hle/kernel/k_writable_event.h b/src/core/hle/kernel/k_writable_event.h index 518f5448d..154d2382c 100644 --- a/src/core/hle/kernel/k_writable_event.h +++ b/src/core/hle/kernel/k_writable_event.h @@ -4,7 +4,8 @@ #pragma once -#include "core/hle/kernel/object.h" +#include "core/hle/kernel/k_auto_object.h" +#include "core/hle/kernel/slab_helpers.h" #include "core/hle/result.h" namespace Kernel { @@ -12,24 +13,19 @@ namespace Kernel { class KernelCore; class KEvent; -class KWritableEvent final : public Object { +class KWritableEvent final + : public KAutoObjectWithSlabHeapAndContainer<KWritableEvent, KAutoObjectWithList> { + KERNEL_AUTOOBJECT_TRAITS(KWritableEvent, KAutoObject); + public: - explicit KWritableEvent(KernelCore& kernel, std::string&& name); + explicit KWritableEvent(KernelCore& kernel); ~KWritableEvent() override; - std::string GetTypeName() const override { - return "KWritableEvent"; - } - - static constexpr HandleType HANDLE_TYPE = HandleType::WritableEvent; - HandleType GetHandleType() const override { - return HANDLE_TYPE; - } - - void Initialize(KEvent* parent_); + virtual void Destroy() override; - void Finalize() override {} + static void PostDestroy([[maybe_unused]] uintptr_t arg) {} + void Initialize(KEvent* parent_, std::string&& name_); ResultCode Signal(); ResultCode Clear(); diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index 5c4f45ab4..32bbf2d9b 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp @@ -26,10 +26,12 @@ #include "core/cpu_manager.h" #include "core/device_memory.h" #include "core/hardware_properties.h" -#include "core/hle/kernel/client_port.h" -#include "core/hle/kernel/handle_table.h" +#include "core/hle/kernel/init/init_slab_setup.h" +#include "core/hle/kernel/k_client_port.h" +#include "core/hle/kernel/k_handle_table.h" #include "core/hle/kernel/k_memory_layout.h" #include "core/hle/kernel/k_memory_manager.h" +#include "core/hle/kernel/k_process.h" #include "core/hle/kernel/k_resource_limit.h" #include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/k_shared_memory.h" @@ -37,7 +39,6 @@ #include "core/hle/kernel/k_thread.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/physical_core.h" -#include "core/hle/kernel/process.h" #include "core/hle/kernel/service_thread.h" #include "core/hle/kernel/svc_results.h" #include "core/hle/kernel/time_manager.h" @@ -51,7 +52,7 @@ namespace Kernel { struct KernelCore::Impl { explicit Impl(Core::System& system, KernelCore& kernel) - : time_manager{system}, global_handle_table{kernel}, system{system} {} + : time_manager{system}, object_list_container{kernel}, system{system} {} void SetMulticore(bool is_multicore) { this->is_multicore = is_multicore; @@ -59,8 +60,7 @@ struct KernelCore::Impl { void Initialize(KernelCore& kernel) { global_scheduler_context = std::make_unique<Kernel::GlobalSchedulerContext>(kernel); - - RegisterHostThread(); + global_handle_table = std::make_unique<Kernel::KHandleTable>(kernel); service_thread_manager = std::make_unique<Common::ThreadWorker>(1, "yuzu:ServiceThreadManager"); @@ -69,14 +69,20 @@ struct KernelCore::Impl { InitializePhysicalCores(); // Derive the initial memory layout from the emulated board + Init::InitializeSlabResourceCounts(kernel); KMemoryLayout memory_layout; DeriveInitialMemoryLayout(memory_layout); - InitializeMemoryLayout(memory_layout); + Init::InitializeSlabHeaps(system, memory_layout); + + // Initialize kernel memory and resources. InitializeSystemResourceLimit(kernel, system.CoreTiming(), memory_layout); - InitializeSlabHeaps(); + InitializeMemoryLayout(memory_layout); + InitializePageSlab(); InitializeSchedulers(); InitializeSuspendThreads(); InitializePreemption(kernel); + + RegisterHostThread(); } void InitializeCores() { @@ -93,34 +99,49 @@ struct KernelCore::Impl { service_threads.clear(); next_object_id = 0; - next_kernel_process_id = Process::InitialKIPIDMin; - next_user_process_id = Process::ProcessIDMin; + next_kernel_process_id = KProcess::InitialKIPIDMin; + next_user_process_id = KProcess::ProcessIDMin; next_thread_id = 1; - for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { - if (suspend_threads[i]) { - suspend_threads[i].reset(); + for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { + if (suspend_threads[core_id]) { + suspend_threads[core_id]->Close(); + suspend_threads[core_id] = nullptr; } + + schedulers[core_id].reset(); } cores.clear(); - current_process = nullptr; + if (current_process) { + current_process->Close(); + current_process = nullptr; + } - global_handle_table.Clear(); + global_handle_table.reset(); preemption_event = nullptr; + for (auto& iter : named_ports) { + iter.second->Close(); + } named_ports.clear(); exclusive_monitor.reset(); - hid_shared_mem = nullptr; - font_shared_mem = nullptr; - irs_shared_mem = nullptr; - time_shared_mem = nullptr; - - system_resource_limit = nullptr; + // Cleanup persistent kernel objects + auto CleanupObject = [](KAutoObject* obj) { + if (obj) { + obj->Close(); + obj = nullptr; + } + }; + CleanupObject(hid_shared_mem); + CleanupObject(font_shared_mem); + CleanupObject(irs_shared_mem); + CleanupObject(time_shared_mem); + CleanupObject(system_resource_limit); // Next host thead ID to use, 0-3 IDs represent core threads, >3 represent others next_host_thread_id = Core::Hardware::NUM_CPU_CORES; @@ -145,7 +166,9 @@ struct KernelCore::Impl { void InitializeSystemResourceLimit(KernelCore& kernel, const Core::Timing::CoreTiming& core_timing, const KMemoryLayout& memory_layout) { - system_resource_limit = std::make_shared<KResourceLimit>(kernel, core_timing); + system_resource_limit = KResourceLimit::Create(system.Kernel()); + system_resource_limit->Initialize(&core_timing); + const auto [total_size, kernel_size] = memory_layout.GetTotalAndKernelMemorySizes(); // If setting the default system values fails, then something seriously wrong has occurred. @@ -189,19 +212,16 @@ struct KernelCore::Impl { } void InitializeSuspendThreads() { - for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { - std::string name = "Suspend Thread Id:" + std::to_string(i); - std::function<void(void*)> init_func = Core::CpuManager::GetSuspendThreadStartFunc(); - void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater(); - auto thread_res = KThread::CreateThread( - system, ThreadType::HighPriority, std::move(name), 0, 0, 0, static_cast<u32>(i), 0, - nullptr, std::move(init_func), init_func_parameter); - - suspend_threads[i] = std::move(thread_res).Unwrap(); + for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { + suspend_threads[core_id] = KThread::Create(system.Kernel()); + ASSERT(KThread::InitializeHighPriorityThread(system, suspend_threads[core_id], {}, {}, + core_id) + .IsSuccess()); + suspend_threads[core_id]->SetName(fmt::format("SuspendThread:{}", core_id)); } } - void MakeCurrentProcess(Process* process) { + void MakeCurrentProcess(KProcess* process) { current_process = process; if (process == nullptr) { return; @@ -232,11 +252,15 @@ struct KernelCore::Impl { // Gets the dummy KThread for the caller, allocating a new one if this is the first time KThread* GetHostDummyThread() { - const thread_local auto thread = - KThread::CreateThread( - system, ThreadType::Main, fmt::format("DummyThread:{}", GetHostThreadId()), 0, - KThread::DefaultThreadPriority, 0, static_cast<u32>(3), 0, nullptr) - .Unwrap(); + auto make_thread = [this]() { + std::unique_ptr<KThread> thread = std::make_unique<KThread>(system.Kernel()); + KAutoObject::Create(thread.get()); + ASSERT(KThread::InitializeDummyThread(thread.get()).IsSuccess()); + thread->SetName(fmt::format("DummyThread:{}", GetHostThreadId())); + return std::move(thread); + }; + + thread_local auto thread = make_thread(); return thread.get(); } @@ -371,7 +395,8 @@ struct KernelCore::Impl { const size_t resource_region_size = memory_layout.GetResourceRegionSizeForInit(); // Determine the size of the slab region. - const size_t slab_region_size = Common::AlignUp(KernelSlabHeapSize, PageSize); + const size_t slab_region_size = + Common::AlignUp(Init::CalculateTotalSlabHeapSize(system.Kernel()), PageSize); ASSERT(slab_region_size <= resource_region_size); // Setup the slab region. @@ -569,25 +594,30 @@ struct KernelCore::Impl { const PAddr irs_phys_addr{system_pool.GetAddress() + hid_size + font_size}; const PAddr time_phys_addr{system_pool.GetAddress() + hid_size + font_size + irs_size}; - hid_shared_mem = Kernel::KSharedMemory::Create( - system.Kernel(), system.DeviceMemory(), nullptr, {hid_phys_addr, hid_size / PageSize}, - KMemoryPermission::None, KMemoryPermission::Read, hid_phys_addr, hid_size, - "HID:SharedMemory"); - font_shared_mem = Kernel::KSharedMemory::Create( - system.Kernel(), system.DeviceMemory(), nullptr, {font_phys_addr, font_size / PageSize}, - KMemoryPermission::None, KMemoryPermission::Read, font_phys_addr, font_size, - "Font:SharedMemory"); - irs_shared_mem = Kernel::KSharedMemory::Create( - system.Kernel(), system.DeviceMemory(), nullptr, {irs_phys_addr, irs_size / PageSize}, - KMemoryPermission::None, KMemoryPermission::Read, irs_phys_addr, irs_size, - "IRS:SharedMemory"); - time_shared_mem = Kernel::KSharedMemory::Create( - system.Kernel(), system.DeviceMemory(), nullptr, {time_phys_addr, time_size / PageSize}, - KMemoryPermission::None, KMemoryPermission::Read, time_phys_addr, time_size, - "Time:SharedMemory"); + hid_shared_mem = KSharedMemory::Create(system.Kernel()); + font_shared_mem = KSharedMemory::Create(system.Kernel()); + irs_shared_mem = KSharedMemory::Create(system.Kernel()); + time_shared_mem = KSharedMemory::Create(system.Kernel()); + + hid_shared_mem->Initialize(system.Kernel(), system.DeviceMemory(), nullptr, + {hid_phys_addr, hid_size / PageSize}, + Svc::MemoryPermission::None, Svc::MemoryPermission::Read, + hid_phys_addr, hid_size, "HID:SharedMemory"); + font_shared_mem->Initialize(system.Kernel(), system.DeviceMemory(), nullptr, + {font_phys_addr, font_size / PageSize}, + Svc::MemoryPermission::None, Svc::MemoryPermission::Read, + font_phys_addr, font_size, "Font:SharedMemory"); + irs_shared_mem->Initialize(system.Kernel(), system.DeviceMemory(), nullptr, + {irs_phys_addr, irs_size / PageSize}, + Svc::MemoryPermission::None, Svc::MemoryPermission::Read, + irs_phys_addr, irs_size, "IRS:SharedMemory"); + time_shared_mem->Initialize(system.Kernel(), system.DeviceMemory(), nullptr, + {time_phys_addr, time_size / PageSize}, + Svc::MemoryPermission::None, Svc::MemoryPermission::Read, + time_phys_addr, time_size, "Time:SharedMemory"); } - void InitializeSlabHeaps() { + void InitializePageSlab() { // Allocate slab heaps user_slab_heap_pages = std::make_unique<KSlabHeap<Page>>(); @@ -596,30 +626,33 @@ struct KernelCore::Impl { // Reserve slab heaps ASSERT( system_resource_limit->Reserve(LimitableResource::PhysicalMemory, user_slab_heap_size)); - // Initialize slab heaps + // Initialize slab heap user_slab_heap_pages->Initialize( system.DeviceMemory().GetPointer(Core::DramMemoryMap::SlabHeapBase), user_slab_heap_size); } std::atomic<u32> next_object_id{0}; - std::atomic<u64> next_kernel_process_id{Process::InitialKIPIDMin}; - std::atomic<u64> next_user_process_id{Process::ProcessIDMin}; + std::atomic<u64> next_kernel_process_id{KProcess::InitialKIPIDMin}; + std::atomic<u64> next_user_process_id{KProcess::ProcessIDMin}; std::atomic<u64> next_thread_id{1}; // Lists all processes that exist in the current session. - std::vector<std::shared_ptr<Process>> process_list; - Process* current_process = nullptr; + std::vector<KProcess*> process_list; + KProcess* current_process{}; std::unique_ptr<Kernel::GlobalSchedulerContext> global_scheduler_context; Kernel::TimeManager time_manager; - std::shared_ptr<KResourceLimit> system_resource_limit; + Init::KSlabResourceCounts slab_resource_counts{}; + KResourceLimit* system_resource_limit{}; std::shared_ptr<Core::Timing::EventType> preemption_event; // This is the kernel's handle table or supervisor handle table which // stores all the objects in place. - HandleTable global_handle_table; + std::unique_ptr<KHandleTable> global_handle_table; + + KAutoObjectWithListContainer object_list_container; /// Map of named ports managed by the kernel, which can be retrieved using /// the ConnectToPort SVC. @@ -636,10 +669,10 @@ struct KernelCore::Impl { std::unique_ptr<KSlabHeap<Page>> user_slab_heap_pages; // Shared memory for services - std::shared_ptr<Kernel::KSharedMemory> hid_shared_mem; - std::shared_ptr<Kernel::KSharedMemory> font_shared_mem; - std::shared_ptr<Kernel::KSharedMemory> irs_shared_mem; - std::shared_ptr<Kernel::KSharedMemory> time_shared_mem; + Kernel::KSharedMemory* hid_shared_mem{}; + Kernel::KSharedMemory* font_shared_mem{}; + Kernel::KSharedMemory* irs_shared_mem{}; + Kernel::KSharedMemory* time_shared_mem{}; // Threads used for services std::unordered_set<std::shared_ptr<Kernel::ServiceThread>> service_threads; @@ -648,7 +681,7 @@ struct KernelCore::Impl { // the release of itself std::unique_ptr<Common::ThreadWorker> service_thread_manager; - std::array<std::shared_ptr<KThread>, Core::Hardware::NUM_CPU_CORES> suspend_threads{}; + std::array<KThread*, Core::Hardware::NUM_CPU_CORES> suspend_threads; std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES> interrupts{}; std::array<std::unique_ptr<Kernel::KScheduler>, Core::Hardware::NUM_CPU_CORES> schedulers{}; @@ -663,15 +696,14 @@ struct KernelCore::Impl { }; KernelCore::KernelCore(Core::System& system) : impl{std::make_unique<Impl>(system, *this)} {} -KernelCore::~KernelCore() { - Shutdown(); -} +KernelCore::~KernelCore() = default; void KernelCore::SetMulticore(bool is_multicore) { impl->SetMulticore(is_multicore); } void KernelCore::Initialize() { + slab_heap_container = std::make_unique<SlabHeapContainer>(); impl->Initialize(*this); } @@ -683,31 +715,35 @@ void KernelCore::Shutdown() { impl->Shutdown(); } -std::shared_ptr<KResourceLimit> KernelCore::GetSystemResourceLimit() const { +const KResourceLimit* KernelCore::GetSystemResourceLimit() const { return impl->system_resource_limit; } -std::shared_ptr<KThread> KernelCore::RetrieveThreadFromGlobalHandleTable(Handle handle) const { - return impl->global_handle_table.Get<KThread>(handle); +KResourceLimit* KernelCore::GetSystemResourceLimit() { + return impl->system_resource_limit; +} + +KScopedAutoObject<KThread> KernelCore::RetrieveThreadFromGlobalHandleTable(Handle handle) const { + return impl->global_handle_table->GetObject<KThread>(handle); } -void KernelCore::AppendNewProcess(std::shared_ptr<Process> process) { - impl->process_list.push_back(std::move(process)); +void KernelCore::AppendNewProcess(KProcess* process) { + impl->process_list.push_back(process); } -void KernelCore::MakeCurrentProcess(Process* process) { +void KernelCore::MakeCurrentProcess(KProcess* process) { impl->MakeCurrentProcess(process); } -Process* KernelCore::CurrentProcess() { +KProcess* KernelCore::CurrentProcess() { return impl->current_process; } -const Process* KernelCore::CurrentProcess() const { +const KProcess* KernelCore::CurrentProcess() const { return impl->current_process; } -const std::vector<std::shared_ptr<Process>>& KernelCore::GetProcessList() const { +const std::vector<KProcess*>& KernelCore::GetProcessList() const { return impl->process_list; } @@ -781,6 +817,14 @@ const Core::ExclusiveMonitor& KernelCore::GetExclusiveMonitor() const { return *impl->exclusive_monitor; } +KAutoObjectWithListContainer& KernelCore::ObjectListContainer() { + return impl->object_list_container; +} + +const KAutoObjectWithListContainer& KernelCore::ObjectListContainer() const { + return impl->object_list_container; +} + void KernelCore::InvalidateAllInstructionCaches() { for (auto& physical_core : impl->cores) { physical_core.ArmInterface().ClearInstructionCache(); @@ -800,8 +844,9 @@ void KernelCore::PrepareReschedule(std::size_t id) { // TODO: Reimplement, this } -void KernelCore::AddNamedPort(std::string name, std::shared_ptr<ClientPort> port) { - impl->named_ports.emplace(std::move(name), std::move(port)); +void KernelCore::AddNamedPort(std::string name, KClientPort* port) { + port->Open(); + impl->named_ports.emplace(std::move(name), port); } KernelCore::NamedPortTable::iterator KernelCore::FindNamedPort(const std::string& name) { @@ -833,12 +878,12 @@ u64 KernelCore::CreateNewUserProcessID() { return impl->next_user_process_id++; } -Kernel::HandleTable& KernelCore::GlobalHandleTable() { - return impl->global_handle_table; +KHandleTable& KernelCore::GlobalHandleTable() { + return *impl->global_handle_table; } -const Kernel::HandleTable& KernelCore::GlobalHandleTable() const { - return impl->global_handle_table; +const KHandleTable& KernelCore::GlobalHandleTable() const { + return *impl->global_handle_table; } void KernelCore::RegisterCoreThread(std::size_t core_id) { @@ -910,9 +955,9 @@ void KernelCore::Suspend(bool in_suspention) { { KScopedSchedulerLock lock(*this); const auto state = should_suspend ? ThreadState::Runnable : ThreadState::Waiting; - for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { - impl->suspend_threads[i]->SetState(state); - impl->suspend_threads[i]->SetWaitReasonForDebugging( + for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { + impl->suspend_threads[core_id]->SetState(state); + impl->suspend_threads[core_id]->SetWaitReasonForDebugging( ThreadWaitReasonForDebugging::Suspended); } } @@ -952,6 +997,14 @@ void KernelCore::ReleaseServiceThread(std::weak_ptr<Kernel::ServiceThread> servi }); } +Init::KSlabResourceCounts& KernelCore::SlabResourceCounts() { + return impl->slab_resource_counts; +} + +const Init::KSlabResourceCounts& KernelCore::SlabResourceCounts() const { + return impl->slab_resource_counts; +} + bool KernelCore::IsPhantomModeForSingleCore() const { return impl->IsPhantomModeForSingleCore(); } @@ -960,4 +1013,12 @@ void KernelCore::SetIsPhantomModeForSingleCore(bool value) { impl->SetIsPhantomModeForSingleCore(value); } +Core::System& KernelCore::System() { + return impl->system; +} + +const Core::System& KernelCore::System() const { + return impl->system; +} + } // namespace Kernel diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h index a500e63bc..51aaccbc7 100644 --- a/src/core/hle/kernel/kernel.h +++ b/src/core/hle/kernel/kernel.h @@ -11,8 +11,10 @@ #include <vector> #include "core/arm/cpu_interrupt_handler.h" #include "core/hardware_properties.h" +#include "core/hle/kernel/k_auto_object.h" +#include "core/hle/kernel/k_slab_heap.h" #include "core/hle/kernel/memory_types.h" -#include "core/hle/kernel/object.h" +#include "core/hle/kernel/svc_common.h" namespace Core { class CPUInterruptHandler; @@ -27,20 +29,32 @@ struct EventType; namespace Kernel { -class ClientPort; +class KClientPort; class GlobalSchedulerContext; -class HandleTable; +class KAutoObjectWithListContainer; +class KClientSession; +class KEvent; +class KHandleTable; +class KLinkedListNode; class KMemoryManager; +class KPort; +class KProcess; class KResourceLimit; class KScheduler; +class KSession; class KSharedMemory; class KThread; +class KTransferMemory; +class KWritableEvent; class PhysicalCore; -class Process; class ServiceThread; class Synchronization; class TimeManager; +namespace Init { +struct KSlabResourceCounts; +} + template <typename T> class KSlabHeap; @@ -51,7 +65,7 @@ constexpr EmuThreadHandle EmuThreadHandleReserved{1ULL << 63}; /// Represents a single instance of the kernel. class KernelCore { private: - using NamedPortTable = std::unordered_map<std::string, std::shared_ptr<ClientPort>>; + using NamedPortTable = std::unordered_map<std::string, KClientPort*>; public: /// Constructs an instance of the kernel using the given System @@ -83,25 +97,28 @@ public: void Shutdown(); /// Retrieves a shared pointer to the system resource limit instance. - std::shared_ptr<KResourceLimit> GetSystemResourceLimit() const; + const KResourceLimit* GetSystemResourceLimit() const; + + /// Retrieves a shared pointer to the system resource limit instance. + KResourceLimit* GetSystemResourceLimit(); /// Retrieves a shared pointer to a Thread instance within the thread wakeup handle table. - std::shared_ptr<KThread> RetrieveThreadFromGlobalHandleTable(Handle handle) const; + KScopedAutoObject<KThread> RetrieveThreadFromGlobalHandleTable(Handle handle) const; /// Adds the given shared pointer to an internal list of active processes. - void AppendNewProcess(std::shared_ptr<Process> process); + void AppendNewProcess(KProcess* process); /// Makes the given process the new current process. - void MakeCurrentProcess(Process* process); + void MakeCurrentProcess(KProcess* process); /// Retrieves a pointer to the current process. - Process* CurrentProcess(); + KProcess* CurrentProcess(); /// Retrieves a const pointer to the current process. - const Process* CurrentProcess() const; + const KProcess* CurrentProcess() const; /// Retrieves the list of processes. - const std::vector<std::shared_ptr<Process>>& GetProcessList() const; + const std::vector<KProcess*>& GetProcessList() const; /// Gets the sole instance of the global scheduler Kernel::GlobalSchedulerContext& GlobalSchedulerContext(); @@ -143,6 +160,10 @@ public: const Core::ExclusiveMonitor& GetExclusiveMonitor() const; + KAutoObjectWithListContainer& ObjectListContainer(); + + const KAutoObjectWithListContainer& ObjectListContainer() const; + std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& Interrupts(); const std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& Interrupts() const; @@ -152,7 +173,7 @@ public: void InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size); /// Adds a port to the named port table - void AddNamedPort(std::string name, std::shared_ptr<ClientPort> port); + void AddNamedPort(std::string name, KClientPort* port); /// Finds a port within the named port table with the given name. NamedPortTable::iterator FindNamedPort(const std::string& name); @@ -225,9 +246,10 @@ public: /** * Creates an HLE service thread, which are used to execute service routines asynchronously. - * While these are allocated per ServerSession, these need to be owned and managed outside of - * ServerSession to avoid a circular dependency. - * @param name String name for the ServerSession creating this thread, used for debug purposes. + * While these are allocated per ServerSession, these need to be owned and managed outside + * of ServerSession to avoid a circular dependency. + * @param name String name for the ServerSession creating this thread, used for debug + * purposes. * @returns The a weak pointer newly created service thread. */ std::weak_ptr<Kernel::ServiceThread> CreateServiceThread(const std::string& name); @@ -243,9 +265,45 @@ public: bool IsPhantomModeForSingleCore() const; void SetIsPhantomModeForSingleCore(bool value); + Core::System& System(); + const Core::System& System() const; + + /// Gets the slab heap for the specified kernel object type. + template <typename T> + KSlabHeap<T>& SlabHeap() { + if constexpr (std::is_same_v<T, KClientSession>) { + return slab_heap_container->client_session; + } else if constexpr (std::is_same_v<T, KEvent>) { + return slab_heap_container->event; + } else if constexpr (std::is_same_v<T, KLinkedListNode>) { + return slab_heap_container->linked_list_node; + } else if constexpr (std::is_same_v<T, KPort>) { + return slab_heap_container->port; + } else if constexpr (std::is_same_v<T, KProcess>) { + return slab_heap_container->process; + } else if constexpr (std::is_same_v<T, KResourceLimit>) { + return slab_heap_container->resource_limit; + } else if constexpr (std::is_same_v<T, KSession>) { + return slab_heap_container->session; + } else if constexpr (std::is_same_v<T, KSharedMemory>) { + return slab_heap_container->shared_memory; + } else if constexpr (std::is_same_v<T, KThread>) { + return slab_heap_container->thread; + } else if constexpr (std::is_same_v<T, KTransferMemory>) { + return slab_heap_container->transfer_memory; + } else if constexpr (std::is_same_v<T, KWritableEvent>) { + return slab_heap_container->writeable_event; + } + } + + /// Gets the current slab resource counts. + Init::KSlabResourceCounts& SlabResourceCounts(); + + /// Gets the current slab resource counts. + const Init::KSlabResourceCounts& SlabResourceCounts() const; + private: - friend class Object; - friend class Process; + friend class KProcess; friend class KThread; /// Creates a new object ID, incrementing the internal object ID counter. @@ -261,14 +319,33 @@ private: u64 CreateNewThreadID(); /// Provides a reference to the global handle table. - Kernel::HandleTable& GlobalHandleTable(); + KHandleTable& GlobalHandleTable(); /// Provides a const reference to the global handle table. - const Kernel::HandleTable& GlobalHandleTable() const; + const KHandleTable& GlobalHandleTable() const; struct Impl; std::unique_ptr<Impl> impl; + bool exception_exited{}; + +private: + /// Helper to encapsulate all slab heaps in a single heap allocated container + struct SlabHeapContainer { + KSlabHeap<KClientSession> client_session; + KSlabHeap<KEvent> event; + KSlabHeap<KLinkedListNode> linked_list_node; + KSlabHeap<KPort> port; + KSlabHeap<KProcess> process; + KSlabHeap<KResourceLimit> resource_limit; + KSlabHeap<KSession> session; + KSlabHeap<KSharedMemory> shared_memory; + KSlabHeap<KThread> thread; + KSlabHeap<KTransferMemory> transfer_memory; + KSlabHeap<KWritableEvent> writeable_event; + }; + + std::unique_ptr<SlabHeapContainer> slab_heap_container; }; } // namespace Kernel diff --git a/src/core/hle/kernel/object.cpp b/src/core/hle/kernel/object.cpp deleted file mode 100644 index d7f40c403..000000000 --- a/src/core/hle/kernel/object.cpp +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2018 Citra Emulator Project -// Licensed under GPLv2 or any later version -// Refer to the license.txt file included. - -#include "common/assert.h" -#include "core/hle/kernel/kernel.h" -#include "core/hle/kernel/object.h" - -namespace Kernel { - -Object::Object(KernelCore& kernel_) - : kernel{kernel_}, object_id{kernel_.CreateNewObjectID()}, name{"[UNKNOWN KERNEL OBJECT]"} {} -Object::Object(KernelCore& kernel_, std::string&& name_) - : kernel{kernel_}, object_id{kernel_.CreateNewObjectID()}, name{std::move(name_)} {} -Object::~Object() = default; - -bool Object::IsWaitable() const { - switch (GetHandleType()) { - case HandleType::ReadableEvent: - case HandleType::Thread: - case HandleType::Process: - case HandleType::ServerPort: - case HandleType::ServerSession: - return true; - - case HandleType::Unknown: - case HandleType::Event: - case HandleType::WritableEvent: - case HandleType::SharedMemory: - case HandleType::TransferMemory: - case HandleType::ResourceLimit: - case HandleType::ClientPort: - case HandleType::ClientSession: - case HandleType::Session: - return false; - } - - UNREACHABLE(); - return false; -} - -} // namespace Kernel diff --git a/src/core/hle/kernel/object.h b/src/core/hle/kernel/object.h deleted file mode 100644 index 501e58b33..000000000 --- a/src/core/hle/kernel/object.h +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright 2018 Citra Emulator Project -// Licensed under GPLv2 or any later version -// Refer to the license.txt file included. - -#pragma once - -#include <atomic> -#include <memory> -#include <string> - -#include "common/common_types.h" - -namespace Kernel { - -class KernelCore; - -using Handle = u32; - -enum class HandleType : u32 { - Unknown, - Event, - WritableEvent, - ReadableEvent, - SharedMemory, - TransferMemory, - Thread, - Process, - ResourceLimit, - ClientPort, - ServerPort, - ClientSession, - ServerSession, - Session, -}; - -class Object : NonCopyable, public std::enable_shared_from_this<Object> { -public: - explicit Object(KernelCore& kernel_); - explicit Object(KernelCore& kernel_, std::string&& name_); - virtual ~Object(); - - /// Returns a unique identifier for the object. For debugging purposes only. - u32 GetObjectId() const { - return object_id.load(std::memory_order_relaxed); - } - - virtual std::string GetTypeName() const { - return "[BAD KERNEL OBJECT TYPE]"; - } - virtual std::string GetName() const { - return name; - } - virtual HandleType GetHandleType() const = 0; - - void Close() { - // TODO(bunnei): This is a placeholder to decrement the reference count, which we will use - // when we implement KAutoObject instead of using shared_ptr. - } - - /** - * Check if a thread can wait on the object - * @return True if a thread can wait on the object, otherwise false - */ - bool IsWaitable() const; - - virtual void Finalize() = 0; - -protected: - /// The kernel instance this object was created under. - KernelCore& kernel; - -private: - std::atomic<u32> object_id{0}; - std::string name; -}; - -template <typename T> -std::shared_ptr<T> SharedFrom(T* raw) { - if (raw == nullptr) - return nullptr; - return std::static_pointer_cast<T>(raw->shared_from_this()); -} - -/** - * Attempts to downcast the given Object pointer to a pointer to T. - * @return Derived pointer to the object, or `nullptr` if `object` isn't of type T. - */ -template <typename T> -inline std::shared_ptr<T> DynamicObjectCast(std::shared_ptr<Object> object) { - if (object != nullptr && object->GetHandleType() == T::HANDLE_TYPE) { - return std::static_pointer_cast<T>(object); - } - return nullptr; -} - -} // namespace Kernel diff --git a/src/core/hle/kernel/process_capability.cpp b/src/core/hle/kernel/process_capability.cpp index 1006ee50c..fcb8b1ea5 100644 --- a/src/core/hle/kernel/process_capability.cpp +++ b/src/core/hle/kernel/process_capability.cpp @@ -6,7 +6,7 @@ #include "common/bit_util.h" #include "common/logging/log.h" -#include "core/hle/kernel/handle_table.h" +#include "core/hle/kernel/k_handle_table.h" #include "core/hle/kernel/k_page_table.h" #include "core/hle/kernel/process_capability.h" #include "core/hle/kernel/svc_results.h" @@ -99,7 +99,7 @@ void ProcessCapabilities::InitializeForMetadatalessProcess() { interrupt_capabilities.set(); // Allow using the maximum possible amount of handles - handle_table_size = static_cast<s32>(HandleTable::MAX_COUNT); + handle_table_size = static_cast<s32>(KHandleTable::MaxTableSize); // Allow all debugging capabilities. is_debuggable = true; @@ -159,7 +159,7 @@ ResultCode ProcessCapabilities::ParseSingleFlagCapability(u32& set_flags, u32& s const auto type = GetCapabilityType(flag); if (type == CapabilityType::Unset) { - return ResultInvalidCapabilityDescriptor; + return ResultInvalidArgument; } // Bail early on ignorable entries, as one would expect, @@ -202,7 +202,7 @@ ResultCode ProcessCapabilities::ParseSingleFlagCapability(u32& set_flags, u32& s } LOG_ERROR(Kernel, "Invalid capability type! type={}", type); - return ResultInvalidCapabilityDescriptor; + return ResultInvalidArgument; } void ProcessCapabilities::Clear() { @@ -225,7 +225,7 @@ ResultCode ProcessCapabilities::HandlePriorityCoreNumFlags(u32 flags) { if (priority_mask != 0 || core_mask != 0) { LOG_ERROR(Kernel, "Core or priority mask are not zero! priority_mask={}, core_mask={}", priority_mask, core_mask); - return ResultInvalidCapabilityDescriptor; + return ResultInvalidArgument; } const u32 core_num_min = (flags >> 16) & 0xFF; @@ -329,7 +329,7 @@ ResultCode ProcessCapabilities::HandleProgramTypeFlags(u32 flags) { const u32 reserved = flags >> 17; if (reserved != 0) { LOG_ERROR(Kernel, "Reserved value is non-zero! reserved={}", reserved); - return ResultReservedValue; + return ResultReservedUsed; } program_type = static_cast<ProgramType>((flags >> 14) & 0b111); @@ -349,7 +349,7 @@ ResultCode ProcessCapabilities::HandleKernelVersionFlags(u32 flags) { LOG_ERROR(Kernel, "Kernel version is non zero or flags are too small! major_version={}, flags={}", major_version, flags); - return ResultInvalidCapabilityDescriptor; + return ResultInvalidArgument; } kernel_version = flags; @@ -360,7 +360,7 @@ ResultCode ProcessCapabilities::HandleHandleTableFlags(u32 flags) { const u32 reserved = flags >> 26; if (reserved != 0) { LOG_ERROR(Kernel, "Reserved value is non-zero! reserved={}", reserved); - return ResultReservedValue; + return ResultReservedUsed; } handle_table_size = static_cast<s32>((flags >> 16) & 0x3FF); @@ -371,7 +371,7 @@ ResultCode ProcessCapabilities::HandleDebugFlags(u32 flags) { const u32 reserved = flags >> 19; if (reserved != 0) { LOG_ERROR(Kernel, "Reserved value is non-zero! reserved={}", reserved); - return ResultReservedValue; + return ResultReservedUsed; } is_debuggable = (flags & 0x20000) != 0; diff --git a/src/core/hle/kernel/server_port.cpp b/src/core/hle/kernel/server_port.cpp deleted file mode 100644 index 5d17346ad..000000000 --- a/src/core/hle/kernel/server_port.cpp +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2016 Citra Emulator Project -// Licensed under GPLv2 or any later version -// Refer to the license.txt file included. - -#include <tuple> -#include "common/assert.h" -#include "core/hle/kernel/client_port.h" -#include "core/hle/kernel/k_thread.h" -#include "core/hle/kernel/object.h" -#include "core/hle/kernel/server_port.h" -#include "core/hle/kernel/server_session.h" -#include "core/hle/kernel/svc_results.h" - -namespace Kernel { - -ServerPort::ServerPort(KernelCore& kernel) : KSynchronizationObject{kernel} {} -ServerPort::~ServerPort() = default; - -ResultVal<std::shared_ptr<ServerSession>> ServerPort::Accept() { - if (pending_sessions.empty()) { - return ResultNotFound; - } - - auto session = std::move(pending_sessions.back()); - pending_sessions.pop_back(); - return MakeResult(std::move(session)); -} - -void ServerPort::AppendPendingSession(std::shared_ptr<ServerSession> pending_session) { - pending_sessions.push_back(std::move(pending_session)); - if (pending_sessions.size() == 1) { - NotifyAvailable(); - } -} - -bool ServerPort::IsSignaled() const { - return !pending_sessions.empty(); -} - -ServerPort::PortPair ServerPort::CreatePortPair(KernelCore& kernel, u32 max_sessions, - std::string name) { - std::shared_ptr<ServerPort> server_port = std::make_shared<ServerPort>(kernel); - std::shared_ptr<ClientPort> client_port = std::make_shared<ClientPort>(kernel); - - server_port->name = name + "_Server"; - client_port->name = name + "_Client"; - client_port->server_port = server_port; - client_port->max_sessions = max_sessions; - client_port->active_sessions = 0; - - return std::make_pair(std::move(server_port), std::move(client_port)); -} - -} // namespace Kernel diff --git a/src/core/hle/kernel/server_port.h b/src/core/hle/kernel/server_port.h deleted file mode 100644 index 29b4f2509..000000000 --- a/src/core/hle/kernel/server_port.h +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2016 Citra Emulator Project -// Licensed under GPLv2 or any later version -// Refer to the license.txt file included. - -#pragma once - -#include <memory> -#include <string> -#include <utility> -#include <vector> -#include "common/common_types.h" -#include "core/hle/kernel/k_synchronization_object.h" -#include "core/hle/kernel/object.h" -#include "core/hle/result.h" - -namespace Kernel { - -class ClientPort; -class KernelCore; -class ServerSession; -class SessionRequestHandler; - -class ServerPort final : public KSynchronizationObject { -public: - explicit ServerPort(KernelCore& kernel); - ~ServerPort() override; - - using HLEHandler = std::shared_ptr<SessionRequestHandler>; - using PortPair = std::pair<std::shared_ptr<ServerPort>, std::shared_ptr<ClientPort>>; - - /** - * Creates a pair of ServerPort and an associated ClientPort. - * - * @param kernel The kernel instance to create the port pair under. - * @param max_sessions Maximum number of sessions to the port - * @param name Optional name of the ports - * @return The created port tuple - */ - static PortPair CreatePortPair(KernelCore& kernel, u32 max_sessions, - std::string name = "UnknownPort"); - - std::string GetTypeName() const override { - return "ServerPort"; - } - std::string GetName() const override { - return name; - } - - static constexpr HandleType HANDLE_TYPE = HandleType::ServerPort; - HandleType GetHandleType() const override { - return HANDLE_TYPE; - } - - /** - * Accepts a pending incoming connection on this port. If there are no pending sessions, will - * return ERR_NO_PENDING_SESSIONS. - */ - ResultVal<std::shared_ptr<ServerSession>> Accept(); - - /// Whether or not this server port has an HLE handler available. - bool HasHLEHandler() const { - return hle_handler != nullptr; - } - - /// Gets the HLE handler for this port. - HLEHandler GetHLEHandler() const { - return hle_handler; - } - - /** - * Sets the HLE handler template for the port. ServerSessions crated by connecting to this port - * will inherit a reference to this handler. - */ - void SetHleHandler(HLEHandler hle_handler_) { - hle_handler = std::move(hle_handler_); - } - - /// Appends a ServerSession to the collection of ServerSessions - /// waiting to be accepted by this port. - void AppendPendingSession(std::shared_ptr<ServerSession> pending_session); - - bool IsSignaled() const override; - - void Finalize() override {} - -private: - /// ServerSessions waiting to be accepted by the port - std::vector<std::shared_ptr<ServerSession>> pending_sessions; - - /// This session's HLE request handler template (optional) - /// ServerSessions created from this port inherit a reference to this handler. - HLEHandler hle_handler; - - /// Name of the port (optional) - std::string name; -}; - -} // namespace Kernel diff --git a/src/core/hle/kernel/service_thread.cpp b/src/core/hle/kernel/service_thread.cpp index ee46f3e21..04be8a502 100644 --- a/src/core/hle/kernel/service_thread.cpp +++ b/src/core/hle/kernel/service_thread.cpp @@ -13,8 +13,8 @@ #include "common/scope_exit.h" #include "common/thread.h" #include "core/core.h" +#include "core/hle/kernel/k_session.h" #include "core/hle/kernel/kernel.h" -#include "core/hle/kernel/server_session.h" #include "core/hle/kernel/service_thread.h" #include "core/hle/lock.h" #include "video_core/renderer_base.h" @@ -26,7 +26,7 @@ public: explicit Impl(KernelCore& kernel, std::size_t num_threads, const std::string& name); ~Impl(); - void QueueSyncRequest(ServerSession& session, std::shared_ptr<HLERequestContext>&& context); + void QueueSyncRequest(KSession& session, std::shared_ptr<HLERequestContext>&& context); private: std::vector<std::thread> threads; @@ -69,18 +69,27 @@ ServiceThread::Impl::Impl(KernelCore& kernel, std::size_t num_threads, const std }); } -void ServiceThread::Impl::QueueSyncRequest(ServerSession& session, +void ServiceThread::Impl::QueueSyncRequest(KSession& session, std::shared_ptr<HLERequestContext>&& context) { { std::unique_lock lock{queue_mutex}; - // ServerSession owns the service thread, so we cannot caption a strong pointer here in the - // event that the ServerSession is terminated. - std::weak_ptr<ServerSession> weak_ptr{SharedFrom(&session)}; - requests.emplace([weak_ptr, context{std::move(context)}]() { - if (auto strong_ptr = weak_ptr.lock()) { - strong_ptr->CompleteSyncRequest(*context); + // Open a reference to the session to ensure it is not closes while the service request + // completes asynchronously. + session.Open(); + + requests.emplace([session_ptr{&session}, context{std::move(context)}]() { + // Close the reference. + SCOPE_EXIT({ session_ptr->Close(); }); + + // If the session has been closed, we are done. + if (session_ptr->IsServerClosed()) { + return; } + + // Complete the service request. + KScopedAutoObject server_session{&session_ptr->GetServerSession()}; + server_session->CompleteSyncRequest(*context); }); } condition.notify_one(); @@ -102,7 +111,7 @@ ServiceThread::ServiceThread(KernelCore& kernel, std::size_t num_threads, const ServiceThread::~ServiceThread() = default; -void ServiceThread::QueueSyncRequest(ServerSession& session, +void ServiceThread::QueueSyncRequest(KSession& session, std::shared_ptr<HLERequestContext>&& context) { impl->QueueSyncRequest(session, std::move(context)); } diff --git a/src/core/hle/kernel/service_thread.h b/src/core/hle/kernel/service_thread.h index 025ab8fb5..6a7fd7c56 100644 --- a/src/core/hle/kernel/service_thread.h +++ b/src/core/hle/kernel/service_thread.h @@ -11,14 +11,14 @@ namespace Kernel { class HLERequestContext; class KernelCore; -class ServerSession; +class KSession; class ServiceThread final { public: explicit ServiceThread(KernelCore& kernel, std::size_t num_threads, const std::string& name); ~ServiceThread(); - void QueueSyncRequest(ServerSession& session, std::shared_ptr<HLERequestContext>&& context); + void QueueSyncRequest(KSession& session, std::shared_ptr<HLERequestContext>&& context); private: class Impl; diff --git a/src/core/hle/kernel/session.cpp b/src/core/hle/kernel/session.cpp deleted file mode 100644 index 8830d4e91..000000000 --- a/src/core/hle/kernel/session.cpp +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2019 yuzu emulator team -// Licensed under GPLv2 or any later version -// Refer to the license.txt file included. - -#include "common/assert.h" -#include "core/hle/kernel/client_session.h" -#include "core/hle/kernel/k_scoped_resource_reservation.h" -#include "core/hle/kernel/server_session.h" -#include "core/hle/kernel/session.h" - -namespace Kernel { - -Session::Session(KernelCore& kernel) : KSynchronizationObject{kernel} {} -Session::~Session() { - // Release reserved resource when the Session pair was created. - kernel.GetSystemResourceLimit()->Release(LimitableResource::Sessions, 1); -} - -Session::SessionPair Session::Create(KernelCore& kernel, std::string name) { - // Reserve a new session from the resource limit. - KScopedResourceReservation session_reservation(kernel.GetSystemResourceLimit(), - LimitableResource::Sessions); - ASSERT(session_reservation.Succeeded()); - auto session{std::make_shared<Session>(kernel)}; - auto client_session{Kernel::ClientSession::Create(kernel, session, name + "_Client").Unwrap()}; - auto server_session{Kernel::ServerSession::Create(kernel, session, name + "_Server").Unwrap()}; - - session->name = std::move(name); - session->client = client_session; - session->server = server_session; - - session_reservation.Commit(); - return std::make_pair(std::move(client_session), std::move(server_session)); -} - -bool Session::IsSignaled() const { - UNIMPLEMENTED(); - return true; -} - -} // namespace Kernel diff --git a/src/core/hle/kernel/session.h b/src/core/hle/kernel/session.h deleted file mode 100644 index fa3c5651a..000000000 --- a/src/core/hle/kernel/session.h +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2019 yuzu emulator team -// Licensed under GPLv2 or any later version -// Refer to the license.txt file included. - -#pragma once - -#include <memory> -#include <string> -#include <utility> - -#include "core/hle/kernel/k_synchronization_object.h" - -namespace Kernel { - -class ClientSession; -class ServerSession; - -/** - * Parent structure to link the client and server endpoints of a session with their associated - * client port. - */ -class Session final : public KSynchronizationObject { -public: - explicit Session(KernelCore& kernel); - ~Session() override; - - using SessionPair = std::pair<std::shared_ptr<ClientSession>, std::shared_ptr<ServerSession>>; - - static SessionPair Create(KernelCore& kernel, std::string name = "Unknown"); - - std::string GetName() const override { - return name; - } - - static constexpr HandleType HANDLE_TYPE = HandleType::Session; - HandleType GetHandleType() const override { - return HANDLE_TYPE; - } - - bool IsSignaled() const override; - - void Finalize() override {} - - std::shared_ptr<ClientSession> Client() { - if (auto result{client.lock()}) { - return result; - } - return {}; - } - - std::shared_ptr<ServerSession> Server() { - if (auto result{server.lock()}) { - return result; - } - return {}; - } - -private: - std::string name; - std::weak_ptr<ClientSession> client; - std::weak_ptr<ServerSession> server; -}; - -} // namespace Kernel diff --git a/src/core/hle/kernel/slab_helpers.h b/src/core/hle/kernel/slab_helpers.h new file mode 100644 index 000000000..0c5995db0 --- /dev/null +++ b/src/core/hle/kernel/slab_helpers.h @@ -0,0 +1,148 @@ +// Copyright 2021 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include <atomic> + +#include "common/assert.h" +#include "common/common_funcs.h" +#include "common/common_types.h" +#include "common/intrusive_red_black_tree.h" +#include "core/hle/kernel/k_auto_object.h" +#include "core/hle/kernel/k_auto_object_container.h" +#include "core/hle/kernel/k_light_lock.h" +#include "core/hle/kernel/k_slab_heap.h" +#include "core/hle/kernel/kernel.h" + +namespace Kernel { + +template <class Derived> +class KSlabAllocated { +public: + constexpr KSlabAllocated() = default; + + size_t GetSlabIndex(KernelCore& kernel) const { + return kernel.SlabHeap<Derived>().GetIndex(static_cast<const Derived*>(this)); + } + +public: + static void InitializeSlabHeap(KernelCore& kernel, void* memory, size_t memory_size) { + kernel.SlabHeap<Derived>().Initialize(memory, memory_size); + } + + static Derived* Allocate(KernelCore& kernel) { + return kernel.SlabHeap<Derived>().Allocate(); + } + + static void Free(KernelCore& kernel, Derived* obj) { + kernel.SlabHeap<Derived>().Free(obj); + } + + static size_t GetObjectSize(KernelCore& kernel) { + return kernel.SlabHeap<Derived>().GetObjectSize(); + } + + static size_t GetSlabHeapSize(KernelCore& kernel) { + return kernel.SlabHeap<Derived>().GetSlabHeapSize(); + } + + static size_t GetPeakIndex(KernelCore& kernel) { + return kernel.SlabHeap<Derived>().GetPeakIndex(); + } + + static uintptr_t GetSlabHeapAddress(KernelCore& kernel) { + return kernel.SlabHeap<Derived>().GetSlabHeapAddress(); + } + + static size_t GetNumRemaining(KernelCore& kernel) { + return kernel.SlabHeap<Derived>().GetNumRemaining(); + } +}; + +template <typename Derived, typename Base> +class KAutoObjectWithSlabHeapAndContainer : public Base { + static_assert(std::is_base_of<KAutoObjectWithList, Base>::value); + +private: + static Derived* Allocate(KernelCore& kernel) { + return kernel.SlabHeap<Derived>().AllocateWithKernel(kernel); + } + + static void Free(KernelCore& kernel, Derived* obj) { + kernel.SlabHeap<Derived>().Free(obj); + } + +public: + KAutoObjectWithSlabHeapAndContainer(KernelCore& kernel_) : Base(kernel_), kernel(kernel_) {} + virtual ~KAutoObjectWithSlabHeapAndContainer() {} + + virtual void Destroy() override { + const bool is_initialized = this->IsInitialized(); + uintptr_t arg = 0; + if (is_initialized) { + kernel.ObjectListContainer().Unregister(this); + arg = this->GetPostDestroyArgument(); + this->Finalize(); + } + Free(kernel, static_cast<Derived*>(this)); + if (is_initialized) { + Derived::PostDestroy(arg); + } + } + + virtual bool IsInitialized() const { + return true; + } + virtual uintptr_t GetPostDestroyArgument() const { + return 0; + } + + size_t GetSlabIndex() const { + return SlabHeap<Derived>(kernel).GetObjectIndex(static_cast<const Derived*>(this)); + } + +public: + static void InitializeSlabHeap(KernelCore& kernel, void* memory, size_t memory_size) { + kernel.SlabHeap<Derived>().Initialize(memory, memory_size); + kernel.ObjectListContainer().Initialize(); + } + + static Derived* Create(KernelCore& kernel) { + Derived* obj = Allocate(kernel); + if (obj != nullptr) { + KAutoObject::Create(obj); + } + return obj; + } + + static void Register(KernelCore& kernel, Derived* obj) { + return kernel.ObjectListContainer().Register(obj); + } + + static size_t GetObjectSize(KernelCore& kernel) { + return kernel.SlabHeap<Derived>().GetObjectSize(); + } + + static size_t GetSlabHeapSize(KernelCore& kernel) { + return kernel.SlabHeap<Derived>().GetSlabHeapSize(); + } + + static size_t GetPeakIndex(KernelCore& kernel) { + return kernel.SlabHeap<Derived>().GetPeakIndex(); + } + + static uintptr_t GetSlabHeapAddress(KernelCore& kernel) { + return kernel.SlabHeap<Derived>().GetSlabHeapAddress(); + } + + static size_t GetNumRemaining(KernelCore& kernel) { + return kernel.SlabHeap<Derived>().GetNumRemaining(); + } + +protected: + KernelCore& kernel; +}; + +} // namespace Kernel diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index bebb86154..52011be9c 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp @@ -21,15 +21,16 @@ #include "core/core_timing.h" #include "core/core_timing_util.h" #include "core/cpu_manager.h" -#include "core/hle/kernel/client_port.h" -#include "core/hle/kernel/client_session.h" -#include "core/hle/kernel/handle_table.h" #include "core/hle/kernel/k_address_arbiter.h" +#include "core/hle/kernel/k_client_port.h" +#include "core/hle/kernel/k_client_session.h" #include "core/hle/kernel/k_condition_variable.h" #include "core/hle/kernel/k_event.h" +#include "core/hle/kernel/k_handle_table.h" #include "core/hle/kernel/k_memory_block.h" #include "core/hle/kernel/k_memory_layout.h" #include "core/hle/kernel/k_page_table.h" +#include "core/hle/kernel/k_process.h" #include "core/hle/kernel/k_readable_event.h" #include "core/hle/kernel/k_resource_limit.h" #include "core/hle/kernel/k_scheduler.h" @@ -38,16 +39,15 @@ #include "core/hle/kernel/k_shared_memory.h" #include "core/hle/kernel/k_synchronization_object.h" #include "core/hle/kernel/k_thread.h" +#include "core/hle/kernel/k_transfer_memory.h" #include "core/hle/kernel/k_writable_event.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/physical_core.h" -#include "core/hle/kernel/process.h" #include "core/hle/kernel/svc.h" #include "core/hle/kernel/svc_results.h" #include "core/hle/kernel/svc_types.h" #include "core/hle/kernel/svc_wrap.h" #include "core/hle/kernel/time_manager.h" -#include "core/hle/kernel/transfer_memory.h" #include "core/hle/lock.h" #include "core/hle/result.h" #include "core/hle/service/service.h" @@ -113,7 +113,7 @@ ResultCode MapUnmapMemorySanityChecks(const KPageTable& manager, VAddr dst_addr, LOG_ERROR(Kernel_SVC, "Destination is not within the stack region, addr=0x{:016X}, size=0x{:016X}", dst_addr, size); - return ResultInvalidMemoryRange; + return ResultInvalidMemoryRegion; } if (manager.IsInsideHeapRegion(dst_addr, size)) { @@ -121,7 +121,7 @@ ResultCode MapUnmapMemorySanityChecks(const KPageTable& manager, VAddr dst_addr, "Destination does not fit within the heap region, addr=0x{:016X}, " "size=0x{:016X}", dst_addr, size); - return ResultInvalidMemoryRange; + return ResultInvalidMemoryRegion; } if (manager.IsInsideAliasRegion(dst_addr, size)) { @@ -129,7 +129,7 @@ ResultCode MapUnmapMemorySanityChecks(const KPageTable& manager, VAddr dst_addr, "Destination does not fit within the map region, addr=0x{:016X}, " "size=0x{:016X}", dst_addr, size); - return ResultInvalidMemoryRange; + return ResultInvalidMemoryRegion; } return RESULT_SUCCESS; @@ -141,38 +141,6 @@ enum class ResourceLimitValueType { PeakValue, }; -ResultVal<s64> RetrieveResourceLimitValue(Core::System& system, Handle resource_limit, - u32 resource_type, ResourceLimitValueType value_type) { - std::lock_guard lock{HLE::g_hle_lock}; - const auto type = static_cast<LimitableResource>(resource_type); - if (!IsValidResourceType(type)) { - LOG_ERROR(Kernel_SVC, "Invalid resource limit type: '{}'", resource_type); - return ResultInvalidEnumValue; - } - - const auto* const current_process = system.Kernel().CurrentProcess(); - ASSERT(current_process != nullptr); - - const auto resource_limit_object = - current_process->GetHandleTable().Get<KResourceLimit>(resource_limit); - if (!resource_limit_object) { - LOG_ERROR(Kernel_SVC, "Handle to non-existent resource limit instance used. Handle={:08X}", - resource_limit); - return ResultInvalidHandle; - } - - switch (value_type) { - case ResourceLimitValueType::CurrentValue: - return MakeResult(resource_limit_object->GetCurrentValue(type)); - case ResourceLimitValueType::LimitValue: - return MakeResult(resource_limit_object->GetLimitValue(type)); - case ResourceLimitValueType::PeakValue: - return MakeResult(resource_limit_object->GetPeakValue(type)); - default: - LOG_ERROR(Kernel_SVC, "Invalid resource value_type: '{}'", value_type); - return ResultInvalidEnumValue; - } -} } // Anonymous namespace /// Set the process heap to a given Size. It can both extend and shrink the heap. @@ -291,11 +259,8 @@ static ResultCode UnmapMemory32(Core::System& system, u32 dst_addr, u32 src_addr } /// Connect to an OS service given the port name, returns the handle to the port to out -static ResultCode ConnectToNamedPort(Core::System& system, Handle* out_handle, - VAddr port_name_address) { - std::lock_guard lock{HLE::g_hle_lock}; +static ResultCode ConnectToNamedPort(Core::System& system, Handle* out, VAddr port_name_address) { auto& memory = system.Memory(); - if (!memory.IsValidVirtualAddress(port_name_address)) { LOG_ERROR(Kernel_SVC, "Port Name Address is not a valid virtual address, port_name_address=0x{:016X}", @@ -314,21 +279,33 @@ static ResultCode ConnectToNamedPort(Core::System& system, Handle* out_handle, LOG_TRACE(Kernel_SVC, "called port_name={}", port_name); + // Get the current handle table. auto& kernel = system.Kernel(); + auto& handle_table = kernel.CurrentProcess()->GetHandleTable(); + + // Find the client port. const auto it = kernel.FindNamedPort(port_name); if (!kernel.IsValidNamedPort(it)) { LOG_WARNING(Kernel_SVC, "tried to connect to unknown port: {}", port_name); return ResultNotFound; } + auto port = it->second; - auto client_port = it->second; + // Reserve a handle for the port. + // NOTE: Nintendo really does write directly to the output handle here. + R_TRY(handle_table.Reserve(out)); + auto handle_guard = SCOPE_GUARD({ handle_table.Unreserve(*out); }); - std::shared_ptr<ClientSession> client_session; - CASCADE_RESULT(client_session, client_port->Connect()); + // Create a session. + KClientSession* session{}; + R_TRY(port->CreateSession(std::addressof(session))); - // Return the client session - auto& handle_table = kernel.CurrentProcess()->GetHandleTable(); - CASCADE_RESULT(*out_handle, handle_table.Create(client_session)); + // Register the session in the table, close the extra reference. + handle_table.Register(*out, session); + session->Close(); + + // We succeeded. + handle_guard.Cancel(); return RESULT_SUCCESS; } @@ -340,14 +317,12 @@ static ResultCode ConnectToNamedPort32(Core::System& system, Handle* out_handle, /// Makes a blocking IPC call to an OS service. static ResultCode SendSyncRequest(Core::System& system, Handle handle) { + auto& kernel = system.Kernel(); - const auto& handle_table = kernel.CurrentProcess()->GetHandleTable(); - std::shared_ptr<ClientSession> session = handle_table.Get<ClientSession>(handle); - if (!session) { - LOG_ERROR(Kernel_SVC, "called with invalid handle=0x{:08X}", handle); - return ResultInvalidHandle; - } + KScopedAutoObject session = + kernel.CurrentProcess()->GetHandleTable().GetObject<KClientSession>(handle); + R_UNLESS(session.IsNotNull(), ResultInvalidHandle); LOG_TRACE(Kernel_SVC, "called handle=0x{:08X}({})", handle, session->GetName()); auto thread = kernel.CurrentScheduler()->GetCurrentThread(); @@ -355,7 +330,7 @@ static ResultCode SendSyncRequest(Core::System& system, Handle handle) { KScopedSchedulerLock lock(kernel); thread->SetState(ThreadState::Waiting); thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::IPC); - session->SendSyncRequest(SharedFrom(thread), system.Memory(), system.CoreTiming()); + session->SendSyncRequest(thread, system.Memory(), system.CoreTiming()); } KSynchronizationObject* dummy{}; @@ -368,18 +343,13 @@ static ResultCode SendSyncRequest32(Core::System& system, Handle handle) { /// Get the ID for the specified thread. static ResultCode GetThreadId(Core::System& system, u64* out_thread_id, Handle thread_handle) { - LOG_TRACE(Kernel_SVC, "called thread=0x{:08X}", thread_handle); - // Get the thread from its handle. - const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); - const std::shared_ptr<KThread> thread = handle_table.Get<KThread>(thread_handle); - if (!thread) { - LOG_ERROR(Kernel_SVC, "Invalid thread handle provided (handle={:08X})", thread_handle); - return ResultInvalidHandle; - } + KScopedAutoObject thread = + system.Kernel().CurrentProcess()->GetHandleTable().GetObject<KThread>(thread_handle); + R_UNLESS(thread.IsNotNull(), ResultInvalidHandle); // Get the thread's id. - *out_thread_id = thread->GetThreadID(); + *out_thread_id = thread->GetId(); return RESULT_SUCCESS; } @@ -395,110 +365,101 @@ static ResultCode GetThreadId32(Core::System& system, u32* out_thread_id_low, } /// Gets the ID of the specified process or a specified thread's owning process. -static ResultCode GetProcessId(Core::System& system, u64* process_id, Handle handle) { +static ResultCode GetProcessId(Core::System& system, u64* out_process_id, Handle handle) { LOG_DEBUG(Kernel_SVC, "called handle=0x{:08X}", handle); - const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); - const std::shared_ptr<Process> process = handle_table.Get<Process>(handle); - if (process) { - *process_id = process->GetProcessID(); - return RESULT_SUCCESS; + // Get the object from the handle table. + KScopedAutoObject obj = + system.Kernel().CurrentProcess()->GetHandleTable().GetObject<KAutoObject>( + static_cast<Handle>(handle)); + R_UNLESS(obj.IsNotNull(), ResultInvalidHandle); + + // Get the process from the object. + KProcess* process = nullptr; + if (KProcess* p = obj->DynamicCast<KProcess*>(); p != nullptr) { + // The object is a process, so we can use it directly. + process = p; + } else if (KThread* t = obj->DynamicCast<KThread*>(); t != nullptr) { + // The object is a thread, so we want to use its parent. + process = reinterpret_cast<KThread*>(obj.GetPointerUnsafe())->GetOwnerProcess(); + } else { + // TODO(bunnei): This should also handle debug objects before returning. + UNIMPLEMENTED_MSG("Debug objects not implemented"); } - const std::shared_ptr<KThread> thread = handle_table.Get<KThread>(handle); - if (thread) { - const Process* const owner_process = thread->GetOwnerProcess(); - if (!owner_process) { - LOG_ERROR(Kernel_SVC, "Non-existent owning process encountered."); - return ResultInvalidHandle; - } - - *process_id = owner_process->GetProcessID(); - return RESULT_SUCCESS; - } + // Make sure the target process exists. + R_UNLESS(process != nullptr, ResultInvalidHandle); - // NOTE: This should also handle debug objects before returning. + // Get the process id. + *out_process_id = process->GetId(); - LOG_ERROR(Kernel_SVC, "Handle does not exist, handle=0x{:08X}", handle); return ResultInvalidHandle; } -static ResultCode GetProcessId32(Core::System& system, u32* process_id_low, u32* process_id_high, - Handle handle) { - u64 process_id{}; - const auto result = GetProcessId(system, &process_id, handle); - *process_id_low = static_cast<u32>(process_id); - *process_id_high = static_cast<u32>(process_id >> 32); +static ResultCode GetProcessId32(Core::System& system, u32* out_process_id_low, + u32* out_process_id_high, Handle handle) { + u64 out_process_id{}; + const auto result = GetProcessId(system, &out_process_id, handle); + *out_process_id_low = static_cast<u32>(out_process_id); + *out_process_id_high = static_cast<u32>(out_process_id >> 32); return result; } /// Wait for the given handles to synchronize, timeout after the specified nanoseconds static ResultCode WaitSynchronization(Core::System& system, s32* index, VAddr handles_address, - u64 handle_count, s64 nano_seconds) { - LOG_TRACE(Kernel_SVC, "called handles_address=0x{:X}, handle_count={}, nano_seconds={}", - handles_address, handle_count, nano_seconds); + u64 num_handles, s64 nano_seconds) { + LOG_TRACE(Kernel_SVC, "called handles_address=0x{:X}, num_handles={}, nano_seconds={}", + handles_address, num_handles, nano_seconds); - auto& memory = system.Memory(); - if (!memory.IsValidVirtualAddress(handles_address)) { - LOG_ERROR(Kernel_SVC, - "Handle address is not a valid virtual address, handle_address=0x{:016X}", - handles_address); - return ResultInvalidPointer; - } - - static constexpr u64 MaxHandles = 0x40; - - if (handle_count > MaxHandles) { - LOG_ERROR(Kernel_SVC, "Handle count specified is too large, expected {} but got {}", - MaxHandles, handle_count); - return ResultOutOfRange; - } + // Ensure number of handles is valid. + R_UNLESS(0 <= num_handles && num_handles <= ArgumentHandleCountMax, ResultOutOfRange); auto& kernel = system.Kernel(); - std::vector<KSynchronizationObject*> objects(handle_count); + std::vector<KSynchronizationObject*> objs(num_handles); const auto& handle_table = kernel.CurrentProcess()->GetHandleTable(); + Handle* handles = system.Memory().GetPointer<Handle>(handles_address); - for (u64 i = 0; i < handle_count; ++i) { - const Handle handle = memory.Read32(handles_address + i * sizeof(Handle)); - const auto object = handle_table.Get<KSynchronizationObject>(handle); + // Copy user handles. + if (num_handles > 0) { + // Convert the handles to objects. + R_UNLESS(handle_table.GetMultipleObjects<KSynchronizationObject>(objs.data(), handles, + num_handles), + ResultInvalidHandle); + } - if (object == nullptr) { - LOG_ERROR(Kernel_SVC, "Object is a nullptr"); - return ResultInvalidHandle; + // Ensure handles are closed when we're done. + SCOPE_EXIT({ + for (u64 i = 0; i < num_handles; ++i) { + objs[i]->Close(); } + }); - objects[i] = object.get(); - } - return KSynchronizationObject::Wait(kernel, index, objects.data(), - static_cast<s32>(objects.size()), nano_seconds); + return KSynchronizationObject::Wait(kernel, index, objs.data(), static_cast<s32>(objs.size()), + nano_seconds); } static ResultCode WaitSynchronization32(Core::System& system, u32 timeout_low, u32 handles_address, - s32 handle_count, u32 timeout_high, s32* index) { + s32 num_handles, u32 timeout_high, s32* index) { const s64 nano_seconds{(static_cast<s64>(timeout_high) << 32) | static_cast<s64>(timeout_low)}; - return WaitSynchronization(system, index, handles_address, handle_count, nano_seconds); + return WaitSynchronization(system, index, handles_address, num_handles, nano_seconds); } /// Resumes a thread waiting on WaitSynchronization -static ResultCode CancelSynchronization(Core::System& system, Handle thread_handle) { - LOG_TRACE(Kernel_SVC, "called thread=0x{:X}", thread_handle); +static ResultCode CancelSynchronization(Core::System& system, Handle handle) { + LOG_TRACE(Kernel_SVC, "called handle=0x{:X}", handle); // Get the thread from its handle. - const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); - std::shared_ptr<KThread> thread = handle_table.Get<KThread>(thread_handle); - - if (!thread) { - LOG_ERROR(Kernel_SVC, "Invalid thread handle provided (handle={:08X})", thread_handle); - return ResultInvalidHandle; - } + KScopedAutoObject thread = + system.Kernel().CurrentProcess()->GetHandleTable().GetObject<KThread>( + static_cast<Handle>(handle)); // Cancel the thread's wait. thread->WaitCancel(); return RESULT_SUCCESS; } -static ResultCode CancelSynchronization32(Core::System& system, Handle thread_handle) { - return CancelSynchronization(system, thread_handle); +static ResultCode CancelSynchronization32(Core::System& system, Handle handle) { + return CancelSynchronization(system, handle); } /// Attempts to locks a mutex @@ -678,7 +639,7 @@ static void OutputDebugString(Core::System& system, VAddr address, u64 len) { } /// Gets system/memory information for the current process -static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 handle, +static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, Handle handle, u64 info_sub_id) { std::lock_guard lock{HLE::g_hle_lock}; LOG_TRACE(Kernel_SVC, "called info_id=0x{:X}, info_sub_id=0x{:X}, handle=0x{:08X}", info_id, @@ -744,10 +705,9 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha return ResultInvalidEnumValue; } - const auto& current_process_handle_table = - system.Kernel().CurrentProcess()->GetHandleTable(); - const auto process = current_process_handle_table.Get<Process>(static_cast<Handle>(handle)); - if (!process) { + const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); + KScopedAutoObject process = handle_table.GetObject<KProcess>(handle); + if (process.IsNull()) { LOG_ERROR(Kernel_SVC, "Process is not valid! info_id={}, info_sub_id={}, handle={:08X}", info_id, info_sub_id, handle); return ResultInvalidHandle; @@ -851,21 +811,19 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha return ResultInvalidCombination; } - Process* const current_process = system.Kernel().CurrentProcess(); - HandleTable& handle_table = current_process->GetHandleTable(); + KProcess* const current_process = system.Kernel().CurrentProcess(); + KHandleTable& handle_table = current_process->GetHandleTable(); const auto resource_limit = current_process->GetResourceLimit(); if (!resource_limit) { - *result = KernelHandle::InvalidHandle; + *result = Svc::InvalidHandle; // Yes, the kernel considers this a successful operation. return RESULT_SUCCESS; } - const auto table_result = handle_table.Create(resource_limit); - if (table_result.Failed()) { - return table_result.Code(); - } + Handle handle{}; + R_TRY(handle_table.Add(&handle, resource_limit)); - *result = *table_result; + *result = handle; return RESULT_SUCCESS; } @@ -876,9 +834,9 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha return ResultInvalidHandle; } - if (info_sub_id >= Process::RANDOM_ENTROPY_SIZE) { + if (info_sub_id >= KProcess::RANDOM_ENTROPY_SIZE) { LOG_ERROR(Kernel_SVC, "Entropy size is out of range, expected {} but got {}", - Process::RANDOM_ENTROPY_SIZE, info_sub_id); + KProcess::RANDOM_ENTROPY_SIZE, info_sub_id); return ResultInvalidCombination; } @@ -899,9 +857,10 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha return ResultInvalidCombination; } - const auto thread = system.Kernel().CurrentProcess()->GetHandleTable().Get<KThread>( - static_cast<Handle>(handle)); - if (!thread) { + KScopedAutoObject thread = + system.Kernel().CurrentProcess()->GetHandleTable().GetObject<KThread>( + static_cast<Handle>(handle)); + if (thread.IsNull()) { LOG_ERROR(Kernel_SVC, "Thread handle does not exist, handle=0x{:08X}", static_cast<Handle>(handle)); return ResultInvalidHandle; @@ -910,7 +869,7 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha const auto& core_timing = system.CoreTiming(); const auto& scheduler = *system.Kernel().CurrentScheduler(); const auto* const current_thread = scheduler.GetCurrentThread(); - const bool same_thread = current_thread == thread.get(); + const bool same_thread = current_thread == thread.GetPointerUnsafe(); const u64 prev_ctx_ticks = scheduler.GetLastContextSwitchTicks(); u64 out_ticks = 0; @@ -966,10 +925,10 @@ static ResultCode MapPhysicalMemory(Core::System& system, VAddr addr, u64 size) if (!(addr < addr + size)) { LOG_ERROR(Kernel_SVC, "Size causes 64-bit overflow of address"); - return ResultInvalidMemoryRange; + return ResultInvalidMemoryRegion; } - Process* const current_process{system.Kernel().CurrentProcess()}; + KProcess* const current_process{system.Kernel().CurrentProcess()}; auto& page_table{current_process->PageTable()}; if (current_process->GetSystemResourceSize() == 0) { @@ -981,14 +940,14 @@ static ResultCode MapPhysicalMemory(Core::System& system, VAddr addr, u64 size) LOG_ERROR(Kernel_SVC, "Address is not within the address space, addr=0x{:016X}, size=0x{:016X}", addr, size); - return ResultInvalidMemoryRange; + return ResultInvalidMemoryRegion; } if (page_table.IsOutsideAliasRegion(addr, size)) { LOG_ERROR(Kernel_SVC, "Address is not within the alias region, addr=0x{:016X}, size=0x{:016X}", addr, size); - return ResultInvalidMemoryRange; + return ResultInvalidMemoryRegion; } return page_table.MapPhysicalMemory(addr, size); @@ -1020,10 +979,10 @@ static ResultCode UnmapPhysicalMemory(Core::System& system, VAddr addr, u64 size if (!(addr < addr + size)) { LOG_ERROR(Kernel_SVC, "Size causes 64-bit overflow of address"); - return ResultInvalidMemoryRange; + return ResultInvalidMemoryRegion; } - Process* const current_process{system.Kernel().CurrentProcess()}; + KProcess* const current_process{system.Kernel().CurrentProcess()}; auto& page_table{current_process->PageTable()}; if (current_process->GetSystemResourceSize() == 0) { @@ -1035,14 +994,14 @@ static ResultCode UnmapPhysicalMemory(Core::System& system, VAddr addr, u64 size LOG_ERROR(Kernel_SVC, "Address is not within the address space, addr=0x{:016X}, size=0x{:016X}", addr, size); - return ResultInvalidMemoryRange; + return ResultInvalidMemoryRegion; } if (page_table.IsOutsideAliasRegion(addr, size)) { LOG_ERROR(Kernel_SVC, "Address is not within the alias region, addr=0x{:016X}, size=0x{:016X}", addr, size); - return ResultInvalidMemoryRange; + return ResultInvalidMemoryRegion; } return page_table.UnmapPhysicalMemory(addr, size); @@ -1062,37 +1021,19 @@ static ResultCode SetThreadActivity(Core::System& system, Handle thread_handle, constexpr auto IsValidThreadActivity = [](ThreadActivity activity) { return activity == ThreadActivity::Runnable || activity == ThreadActivity::Paused; }; - if (!IsValidThreadActivity(thread_activity)) { - LOG_ERROR(Kernel_SVC, "Invalid thread activity value provided (activity={})", - thread_activity); - return ResultInvalidEnumValue; - } + R_UNLESS(IsValidThreadActivity(thread_activity), ResultInvalidEnumValue); // Get the thread from its handle. - auto& kernel = system.Kernel(); - const auto& handle_table = kernel.CurrentProcess()->GetHandleTable(); - const std::shared_ptr<KThread> thread = handle_table.Get<KThread>(thread_handle); - if (!thread) { - LOG_ERROR(Kernel_SVC, "Invalid thread handle provided (handle={:08X})", thread_handle); - return ResultInvalidHandle; - } + KScopedAutoObject thread = + system.Kernel().CurrentProcess()->GetHandleTable().GetObject<KThread>(thread_handle); + R_UNLESS(thread.IsNotNull(), ResultInvalidHandle); // Check that the activity is being set on a non-current thread for the current process. - if (thread->GetOwnerProcess() != kernel.CurrentProcess()) { - LOG_ERROR(Kernel_SVC, "Invalid owning process for the created thread."); - return ResultInvalidHandle; - } - if (thread.get() == GetCurrentThreadPointer(kernel)) { - LOG_ERROR(Kernel_SVC, "Thread is busy"); - return ResultBusy; - } + R_UNLESS(thread->GetOwnerProcess() == system.Kernel().CurrentProcess(), ResultInvalidHandle); + R_UNLESS(thread.GetPointerUnsafe() != GetCurrentThreadPointer(system.Kernel()), ResultBusy); // Set the activity. - const auto set_result = thread->SetActivity(thread_activity); - if (set_result.IsError()) { - LOG_ERROR(Kernel_SVC, "Failed to set thread activity."); - return set_result; - } + R_TRY(thread->SetActivity(thread_activity)); return RESULT_SUCCESS; } @@ -1107,36 +1048,55 @@ static ResultCode GetThreadContext(Core::System& system, VAddr out_context, Hand LOG_DEBUG(Kernel_SVC, "called, out_context=0x{:08X}, thread_handle=0x{:X}", out_context, thread_handle); + auto& kernel = system.Kernel(); + // Get the thread from its handle. - const auto* current_process = system.Kernel().CurrentProcess(); - const std::shared_ptr<KThread> thread = - current_process->GetHandleTable().Get<KThread>(thread_handle); - if (!thread) { - LOG_ERROR(Kernel_SVC, "Invalid thread handle provided (handle={})", thread_handle); - return ResultInvalidHandle; - } + KScopedAutoObject thread = + kernel.CurrentProcess()->GetHandleTable().GetObject<KThread>(thread_handle); + R_UNLESS(thread.IsNotNull(), ResultInvalidHandle); // Require the handle be to a non-current thread in the current process. - if (thread->GetOwnerProcess() != current_process) { - LOG_ERROR(Kernel_SVC, "Thread owning process is not the current process."); - return ResultInvalidHandle; - } - if (thread.get() == system.Kernel().CurrentScheduler()->GetCurrentThread()) { - LOG_ERROR(Kernel_SVC, "Current thread is busy."); - return ResultBusy; - } + const auto* current_process = kernel.CurrentProcess(); + R_UNLESS(current_process == thread->GetOwnerProcess(), ResultInvalidId); - // Get the thread context. - std::vector<u8> context; - const auto context_result = thread->GetThreadContext3(context); - if (context_result.IsError()) { - LOG_ERROR(Kernel_SVC, "Unable to successfully retrieve thread context (result: {})", - context_result.raw); - return context_result; - } + // Verify that the thread isn't terminated. + R_UNLESS(thread->GetState() != ThreadState::Terminated, ResultTerminationRequested); + + /// Check that the thread is not the current one. + /// NOTE: Nintendo does not check this, and thus the following loop will deadlock. + R_UNLESS(thread.GetPointerUnsafe() != GetCurrentThreadPointer(kernel), ResultInvalidId); + + // Try to get the thread context until the thread isn't current on any core. + while (true) { + KScopedSchedulerLock sl{kernel}; - // Copy the thread context to user space. - system.Memory().WriteBlock(out_context, context.data(), context.size()); + // TODO(bunnei): Enforce that thread is suspended for debug here. + + // If the thread's raw state isn't runnable, check if it's current on some core. + if (thread->GetRawState() != ThreadState::Runnable) { + bool current = false; + for (auto i = 0; i < static_cast<s32>(Core::Hardware::NUM_CPU_CORES); ++i) { + if (thread.GetPointerUnsafe() == kernel.Scheduler(i).GetCurrentThread()) { + current = true; + } + break; + } + + // If the thread is current, retry until it isn't. + if (current) { + continue; + } + } + + // Get the thread context. + std::vector<u8> context; + R_TRY(thread->GetThreadContext3(context)); + + // Copy the thread context to user space. + system.Memory().WriteBlock(out_context, context.data(), context.size()); + + return RESULT_SUCCESS; + } return RESULT_SUCCESS; } @@ -1150,12 +1110,9 @@ static ResultCode GetThreadPriority(Core::System& system, u32* out_priority, Han LOG_TRACE(Kernel_SVC, "called"); // Get the thread from its handle. - const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); - const std::shared_ptr<KThread> thread = handle_table.Get<KThread>(handle); - if (!thread) { - LOG_ERROR(Kernel_SVC, "Invalid thread handle provided (handle={:08X})", handle); - return ResultInvalidHandle; - } + KScopedAutoObject thread = + system.Kernel().CurrentProcess()->GetHandleTable().GetObject<KThread>(handle); + R_UNLESS(thread.IsNotNull(), ResultInvalidHandle); // Get the thread's priority. *out_priority = thread->GetPriority(); @@ -1167,30 +1124,26 @@ static ResultCode GetThreadPriority32(Core::System& system, u32* out_priority, H } /// Sets the priority for the specified thread -static ResultCode SetThreadPriority(Core::System& system, Handle handle, u32 priority) { - LOG_TRACE(Kernel_SVC, "called"); +static ResultCode SetThreadPriority(Core::System& system, Handle thread_handle, u32 priority) { + // Get the current process. + KProcess& process = *system.Kernel().CurrentProcess(); // Validate the priority. - if (HighestThreadPriority > priority || priority > LowestThreadPriority) { - LOG_ERROR(Kernel_SVC, "Invalid thread priority specified (priority={})", priority); - return ResultInvalidPriority; - } + R_UNLESS(HighestThreadPriority <= priority && priority <= LowestThreadPriority, + ResultInvalidPriority); + R_UNLESS(process.CheckThreadPriority(priority), ResultInvalidPriority); // Get the thread from its handle. - const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); - const std::shared_ptr<KThread> thread = handle_table.Get<KThread>(handle); - if (!thread) { - LOG_ERROR(Kernel_SVC, "Invalid handle provided (handle={:08X})", handle); - return ResultInvalidHandle; - } + KScopedAutoObject thread = process.GetHandleTable().GetObject<KThread>(thread_handle); + R_UNLESS(thread.IsNotNull(), ResultInvalidHandle); // Set the thread priority. thread->SetBasePriority(priority); return RESULT_SUCCESS; } -static ResultCode SetThreadPriority32(Core::System& system, Handle handle, u32 priority) { - return SetThreadPriority(system, handle, priority); +static ResultCode SetThreadPriority32(Core::System& system, Handle thread_handle, u32 priority) { + return SetThreadPriority(system, thread_handle, priority); } /// Get which CPU core is executing the current thread @@ -1203,82 +1156,97 @@ static u32 GetCurrentProcessorNumber32(Core::System& system) { return GetCurrentProcessorNumber(system); } -static ResultCode MapSharedMemory(Core::System& system, Handle shared_memory_handle, VAddr addr, - u64 size, u32 permissions) { - std::lock_guard lock{HLE::g_hle_lock}; +constexpr bool IsValidSharedMemoryPermission(Svc::MemoryPermission perm) { + switch (perm) { + case Svc::MemoryPermission::Read: + case Svc::MemoryPermission::ReadWrite: + return true; + default: + return false; + } +} + +constexpr bool IsValidRemoteSharedMemoryPermission(Svc::MemoryPermission perm) { + return IsValidSharedMemoryPermission(perm) || perm == Svc::MemoryPermission::DontCare; +} + +static ResultCode MapSharedMemory(Core::System& system, Handle shmem_handle, VAddr address, + u64 size, Svc::MemoryPermission map_perm) { LOG_TRACE(Kernel_SVC, "called, shared_memory_handle=0x{:X}, addr=0x{:X}, size=0x{:X}, permissions=0x{:08X}", - shared_memory_handle, addr, size, permissions); + shmem_handle, address, size, map_perm); - if (!Common::Is4KBAligned(addr)) { - LOG_ERROR(Kernel_SVC, "Address is not aligned to 4KB, addr=0x{:016X}", addr); - return ResultInvalidAddress; - } + // Validate the address/size. + R_UNLESS(Common::IsAligned(address, PageSize), ResultInvalidAddress); + R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize); + R_UNLESS(size > 0, ResultInvalidSize); + R_UNLESS((address < address + size), ResultInvalidCurrentMemory); - if (size == 0) { - LOG_ERROR(Kernel_SVC, "Size is 0"); - return ResultInvalidSize; - } + // Validate the permission. + R_UNLESS(IsValidSharedMemoryPermission(map_perm), ResultInvalidNewMemoryPermission); - if (!Common::Is4KBAligned(size)) { - LOG_ERROR(Kernel_SVC, "Size is not aligned to 4KB, size=0x{:016X}", size); - return ResultInvalidSize; - } + // Get the current process. + auto& process = *system.Kernel().CurrentProcess(); + auto& page_table = process.PageTable(); - if (!IsValidAddressRange(addr, size)) { - LOG_ERROR(Kernel_SVC, "Region is not a valid address range, addr=0x{:016X}, size=0x{:016X}", - addr, size); - return ResultInvalidCurrentMemory; - } + // Get the shared memory. + KScopedAutoObject shmem = process.GetHandleTable().GetObject<KSharedMemory>(shmem_handle); + R_UNLESS(shmem.IsNotNull(), ResultInvalidHandle); - const auto permission_type = static_cast<MemoryPermission>(permissions); - if ((permission_type | MemoryPermission::Write) != MemoryPermission::ReadWrite) { - LOG_ERROR(Kernel_SVC, "Expected Read or ReadWrite permission but got permissions=0x{:08X}", - permissions); - return ResultInvalidMemoryPermissions; - } + // Verify that the mapping is in range. + R_UNLESS(page_table.CanContain(address, size, KMemoryState::Shared), ResultInvalidMemoryRegion); - auto* const current_process{system.Kernel().CurrentProcess()}; - auto& page_table{current_process->PageTable()}; + // Add the shared memory to the process. + R_TRY(process.AddSharedMemory(shmem.GetPointerUnsafe(), address, size)); - if (page_table.IsInvalidRegion(addr, size)) { - LOG_ERROR(Kernel_SVC, - "Addr does not fit within the valid region, addr=0x{:016X}, " - "size=0x{:016X}", - addr, size); - return ResultInvalidMemoryRange; - } + // Ensure that we clean up the shared memory if we fail to map it. + auto guard = + SCOPE_GUARD({ process.RemoveSharedMemory(shmem.GetPointerUnsafe(), address, size); }); - if (page_table.IsInsideHeapRegion(addr, size)) { - LOG_ERROR(Kernel_SVC, - "Addr does not fit within the heap region, addr=0x{:016X}, " - "size=0x{:016X}", - addr, size); - return ResultInvalidMemoryRange; - } + // Map the shared memory. + R_TRY(shmem->Map(process, address, size, map_perm)); - if (page_table.IsInsideAliasRegion(addr, size)) { - LOG_ERROR(Kernel_SVC, - "Address does not fit within the map region, addr=0x{:016X}, " - "size=0x{:016X}", - addr, size); - return ResultInvalidMemoryRange; - } + // We succeeded. + guard.Cancel(); + return RESULT_SUCCESS; +} - auto shared_memory{current_process->GetHandleTable().Get<KSharedMemory>(shared_memory_handle)}; - if (!shared_memory) { - LOG_ERROR(Kernel_SVC, "Shared memory does not exist, shared_memory_handle=0x{:08X}", - shared_memory_handle); - return ResultInvalidHandle; - } +static ResultCode MapSharedMemory32(Core::System& system, Handle shmem_handle, u32 address, + u32 size, Svc::MemoryPermission map_perm) { + return MapSharedMemory(system, shmem_handle, address, size, map_perm); +} + +static ResultCode UnmapSharedMemory(Core::System& system, Handle shmem_handle, VAddr address, + u64 size) { + // Validate the address/size. + R_UNLESS(Common::IsAligned(address, PageSize), ResultInvalidAddress); + R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize); + R_UNLESS(size > 0, ResultInvalidSize); + R_UNLESS((address < address + size), ResultInvalidCurrentMemory); + + // Get the current process. + auto& process = *system.Kernel().CurrentProcess(); + auto& page_table = process.PageTable(); + + // Get the shared memory. + KScopedAutoObject shmem = process.GetHandleTable().GetObject<KSharedMemory>(shmem_handle); + R_UNLESS(shmem.IsNotNull(), ResultInvalidHandle); - return shared_memory->Map(*current_process, addr, size, - static_cast<KMemoryPermission>(permission_type)); + // Verify that the mapping is in range. + R_UNLESS(page_table.CanContain(address, size, KMemoryState::Shared), ResultInvalidMemoryRegion); + + // Unmap the shared memory. + R_TRY(shmem->Unmap(process, address, size)); + + // Remove the shared memory from the process. + process.RemoveSharedMemory(shmem.GetPointerUnsafe(), address, size); + + return RESULT_SUCCESS; } -static ResultCode MapSharedMemory32(Core::System& system, Handle shared_memory_handle, u32 addr, - u32 size, u32 permissions) { - return MapSharedMemory(system, shared_memory_handle, addr, size, permissions); +static ResultCode UnmapSharedMemory32(Core::System& system, Handle shmem_handle, u32 address, + u32 size) { + return UnmapSharedMemory(system, shmem_handle, address, size); } static ResultCode QueryProcessMemory(Core::System& system, VAddr memory_info_address, @@ -1287,8 +1255,8 @@ static ResultCode QueryProcessMemory(Core::System& system, VAddr memory_info_add std::lock_guard lock{HLE::g_hle_lock}; LOG_TRACE(Kernel_SVC, "called process=0x{:08X} address={:X}", process_handle, address); const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); - std::shared_ptr<Process> process = handle_table.Get<Process>(process_handle); - if (!process) { + KScopedAutoObject process = handle_table.GetObject<KProcess>(process_handle); + if (process.IsNull()) { LOG_ERROR(Kernel_SVC, "Process handle does not exist, process_handle=0x{:08X}", process_handle); return ResultInvalidHandle; @@ -1369,8 +1337,8 @@ static ResultCode MapProcessCodeMemory(Core::System& system, Handle process_hand } const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); - auto process = handle_table.Get<Process>(process_handle); - if (!process) { + KScopedAutoObject process = handle_table.GetObject<KProcess>(process_handle); + if (process.IsNull()) { LOG_ERROR(Kernel_SVC, "Invalid process handle specified (handle=0x{:08X}).", process_handle); return ResultInvalidHandle; @@ -1390,7 +1358,7 @@ static ResultCode MapProcessCodeMemory(Core::System& system, Handle process_hand "Destination address range is not within the ASLR region (dst_address=0x{:016X}, " "size=0x{:016X}).", dst_address, size); - return ResultInvalidMemoryRange; + return ResultInvalidMemoryRegion; } return page_table.MapProcessCodeMemory(dst_address, src_address, size); @@ -1437,8 +1405,8 @@ static ResultCode UnmapProcessCodeMemory(Core::System& system, Handle process_ha } const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); - auto process = handle_table.Get<Process>(process_handle); - if (!process) { + KScopedAutoObject process = handle_table.GetObject<KProcess>(process_handle); + if (process.IsNull()) { LOG_ERROR(Kernel_SVC, "Invalid process handle specified (handle=0x{:08X}).", process_handle); return ResultInvalidHandle; @@ -1458,7 +1426,7 @@ static ResultCode UnmapProcessCodeMemory(Core::System& system, Handle process_ha "Destination address range is not within the ASLR region (dst_address=0x{:016X}, " "size=0x{:016X}).", dst_address, size); - return ResultInvalidMemoryRange; + return ResultInvalidMemoryRegion; } return page_table.UnmapProcessCodeMemory(dst_address, src_address, size); @@ -1483,7 +1451,7 @@ static void ExitProcess32(Core::System& system) { ExitProcess(system); } -static constexpr bool IsValidCoreId(int32_t core_id) { +static constexpr bool IsValidVirtualCoreId(int32_t core_id) { return (0 <= core_id && core_id < static_cast<int32_t>(Core::Hardware::NUM_CPU_CORES)); } @@ -1503,7 +1471,7 @@ static ResultCode CreateThread(Core::System& system, Handle* out_handle, VAddr e } // Validate arguments. - if (!IsValidCoreId(core_id)) { + if (!IsValidVirtualCoreId(core_id)) { LOG_ERROR(Kernel_SVC, "Invalid Core ID specified (id={})", core_id); return ResultInvalidCoreId; } @@ -1521,35 +1489,42 @@ static ResultCode CreateThread(Core::System& system, Handle* out_handle, VAddr e return ResultInvalidPriority; } + // Reserve a new thread from the process resource limit (waiting up to 100ms). KScopedResourceReservation thread_reservation( kernel.CurrentProcess(), LimitableResource::Threads, 1, system.CoreTiming().GetGlobalTimeNs().count() + 100000000); if (!thread_reservation.Succeeded()) { LOG_ERROR(Kernel_SVC, "Could not reserve a new thread"); - return ResultResourceLimitedExceeded; + return ResultLimitReached; } - std::shared_ptr<KThread> thread; - { - KScopedLightLock lk{process.GetStateLock()}; - CASCADE_RESULT(thread, - KThread::CreateUserThread(system, ThreadType::User, "", entry_point, - priority, arg, core_id, stack_bottom, &process)); + // Create the thread. + KThread* thread = KThread::Create(kernel); + if (!thread) { + LOG_ERROR(Kernel_SVC, "Unable to create new threads. Thread creation limit reached."); + return ResultOutOfResource; } + SCOPE_EXIT({ thread->Close(); }); - const auto new_thread_handle = process.GetHandleTable().Create(thread); - if (new_thread_handle.Failed()) { - LOG_ERROR(Kernel_SVC, "Failed to create handle with error=0x{:X}", - new_thread_handle.Code().raw); - return new_thread_handle.Code(); + // Initialize the thread. + { + KScopedLightLock lk{process.GetStateLock()}; + R_TRY(KThread::InitializeUserThread(system, thread, entry_point, arg, stack_bottom, + priority, core_id, &process)); } - *out_handle = *new_thread_handle; // Set the thread name for debugging purposes. - thread->SetName( - fmt::format("thread[entry_point={:X}, handle={:X}]", entry_point, *new_thread_handle)); + thread->SetName(fmt::format("thread[entry_point={:X}, handle={:X}]", entry_point, *out_handle)); + + // Commit the thread reservation. thread_reservation.Commit(); + // Register the new thread. + KThread::Register(kernel, thread); + + // Add the thread to the handle table. + R_TRY(process.GetHandleTable().Add(out_handle, thread)); + return RESULT_SUCCESS; } @@ -1563,21 +1538,15 @@ static ResultCode StartThread(Core::System& system, Handle thread_handle) { LOG_DEBUG(Kernel_SVC, "called thread=0x{:08X}", thread_handle); // Get the thread from its handle. - const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); - const std::shared_ptr<KThread> thread = handle_table.Get<KThread>(thread_handle); - if (!thread) { - LOG_ERROR(Kernel_SVC, "Invalid thread handle provided (handle={:08X})", thread_handle); - return ResultInvalidHandle; - } + KScopedAutoObject thread = + system.Kernel().CurrentProcess()->GetHandleTable().GetObject<KThread>(thread_handle); + R_UNLESS(thread.IsNotNull(), ResultInvalidHandle); // Try to start the thread. - const auto run_result = thread->Run(); - if (run_result.IsError()) { - LOG_ERROR(Kernel_SVC, - "Unable to successfuly start thread (thread handle={:08X}, result={})", - thread_handle, run_result.raw); - return run_result; - } + R_TRY(thread->Run()); + + // If we succeeded, persist a reference to the thread. + thread->Open(); return RESULT_SUCCESS; } @@ -1591,7 +1560,7 @@ static void ExitThread(Core::System& system) { LOG_DEBUG(Kernel_SVC, "called, pc=0x{:08X}", system.CurrentArmInterface().GetPC()); auto* const current_thread = system.Kernel().CurrentScheduler()->GetCurrentThread(); - system.GlobalSchedulerContext().RemoveThread(SharedFrom(current_thread)); + system.GlobalSchedulerContext().RemoveThread(current_thread); current_thread->Exit(); } @@ -1824,8 +1793,11 @@ static void GetSystemTick32(Core::System& system, u32* time_low, u32* time_high) static ResultCode CloseHandle(Core::System& system, Handle handle) { LOG_TRACE(Kernel_SVC, "Closing handle 0x{:08X}", handle); - auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); - return handle_table.Close(handle); + // Remove the handle. + R_UNLESS(system.Kernel().CurrentProcess()->GetHandleTable().Remove(handle), + ResultInvalidHandle); + + return RESULT_SUCCESS; } static ResultCode CloseHandle32(Core::System& system, Handle handle) { @@ -1841,16 +1813,16 @@ static ResultCode ResetSignal(Core::System& system, Handle handle) { // Try to reset as readable event. { - auto readable_event = handle_table.Get<KReadableEvent>(handle); - if (readable_event) { + KScopedAutoObject readable_event = handle_table.GetObject<KReadableEvent>(handle); + if (readable_event.IsNotNull()) { return readable_event->Reset(); } } // Try to reset as process. { - auto process = handle_table.Get<Process>(handle); - if (process) { + KScopedAutoObject process = handle_table.GetObject<KProcess>(handle); + if (process.IsNotNull()) { return process->Reset(); } } @@ -1864,65 +1836,68 @@ static ResultCode ResetSignal32(Core::System& system, Handle handle) { return ResetSignal(system, handle); } -/// Creates a TransferMemory object -static ResultCode CreateTransferMemory(Core::System& system, Handle* handle, VAddr addr, u64 size, - u32 permissions) { - std::lock_guard lock{HLE::g_hle_lock}; - LOG_DEBUG(Kernel_SVC, "called addr=0x{:X}, size=0x{:X}, perms=0x{:08X}", addr, size, - permissions); - - if (!Common::Is4KBAligned(addr)) { - LOG_ERROR(Kernel_SVC, "Address ({:016X}) is not page aligned!", addr); - return ResultInvalidAddress; +static constexpr bool IsValidTransferMemoryPermission(MemoryPermission perm) { + switch (perm) { + case MemoryPermission::None: + case MemoryPermission::Read: + case MemoryPermission::ReadWrite: + return true; + default: + return false; } +} - if (!Common::Is4KBAligned(size) || size == 0) { - LOG_ERROR(Kernel_SVC, "Size ({:016X}) is not page aligned or equal to zero!", size); - return ResultInvalidAddress; - } +/// Creates a TransferMemory object +static ResultCode CreateTransferMemory(Core::System& system, Handle* out, VAddr address, u64 size, + MemoryPermission map_perm) { + auto& kernel = system.Kernel(); - if (!IsValidAddressRange(addr, size)) { - LOG_ERROR(Kernel_SVC, "Address and size cause overflow! (address={:016X}, size={:016X})", - addr, size); - return ResultInvalidCurrentMemory; - } + // Validate the size. + R_UNLESS(Common::IsAligned(address, PageSize), ResultInvalidAddress); + R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize); + R_UNLESS(size > 0, ResultInvalidSize); + R_UNLESS((address < address + size), ResultInvalidCurrentMemory); - const auto perms{static_cast<MemoryPermission>(permissions)}; - if (perms > MemoryPermission::ReadWrite || perms == MemoryPermission::Write) { - LOG_ERROR(Kernel_SVC, "Invalid memory permissions for transfer memory! (perms={:08X})", - permissions); - return ResultInvalidMemoryPermissions; - } + // Validate the permissions. + R_UNLESS(IsValidTransferMemoryPermission(map_perm), ResultInvalidNewMemoryPermission); + + // Get the current process and handle table. + auto& process = *kernel.CurrentProcess(); + auto& handle_table = process.GetHandleTable(); - auto& kernel = system.Kernel(); // Reserve a new transfer memory from the process resource limit. KScopedResourceReservation trmem_reservation(kernel.CurrentProcess(), LimitableResource::TransferMemory); - if (!trmem_reservation.Succeeded()) { - LOG_ERROR(Kernel_SVC, "Could not reserve a new transfer memory"); - return ResultResourceLimitedExceeded; - } - auto transfer_mem_handle = TransferMemory::Create(kernel, system.Memory(), addr, size, - static_cast<KMemoryPermission>(perms)); + R_UNLESS(trmem_reservation.Succeeded(), ResultLimitReached); - if (const auto reserve_result{transfer_mem_handle->Reserve()}; reserve_result.IsError()) { - return reserve_result; - } + // Create the transfer memory. + KTransferMemory* trmem = KTransferMemory::Create(kernel); + R_UNLESS(trmem != nullptr, ResultOutOfResource); - auto& handle_table = kernel.CurrentProcess()->GetHandleTable(); - const auto result{handle_table.Create(std::move(transfer_mem_handle))}; - if (result.Failed()) { - return result.Code(); - } + // Ensure the only reference is in the handle table when we're done. + SCOPE_EXIT({ trmem->Close(); }); + + // Ensure that the region is in range. + R_UNLESS(process.PageTable().Contains(address, size), ResultInvalidCurrentMemory); + + // Initialize the transfer memory. + R_TRY(trmem->Initialize(address, size, map_perm)); + + // Commit the reservation. trmem_reservation.Commit(); - *handle = *result; + // Register the transfer memory. + KTransferMemory::Register(kernel, trmem); + + // Add the transfer memory to the handle table. + R_TRY(handle_table.Add(out, trmem)); + return RESULT_SUCCESS; } -static ResultCode CreateTransferMemory32(Core::System& system, Handle* handle, u32 addr, u32 size, - u32 permissions) { - return CreateTransferMemory(system, handle, addr, size, permissions); +static ResultCode CreateTransferMemory32(Core::System& system, Handle* out, u32 address, u32 size, + MemoryPermission map_perm) { + return CreateTransferMemory(system, out, address, size, map_perm); } static ResultCode GetThreadCoreMask(Core::System& system, Handle thread_handle, s32* out_core_id, @@ -1930,19 +1905,12 @@ static ResultCode GetThreadCoreMask(Core::System& system, Handle thread_handle, LOG_TRACE(Kernel_SVC, "called, handle=0x{:08X}", thread_handle); // Get the thread from its handle. - const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); - const std::shared_ptr<KThread> thread = handle_table.Get<KThread>(thread_handle); - if (!thread) { - LOG_ERROR(Kernel_SVC, "Invalid thread handle specified (handle={:08X})", thread_handle); - return ResultInvalidHandle; - } + KScopedAutoObject thread = + system.Kernel().CurrentProcess()->GetHandleTable().GetObject<KThread>(thread_handle); + R_UNLESS(thread.IsNotNull(), ResultInvalidHandle); // Get the core mask. - const auto result = thread->GetCoreMask(out_core_id, out_affinity_mask); - if (result.IsError()) { - LOG_ERROR(Kernel_SVC, "Unable to successfully retrieve core mask (result={})", result.raw); - return result; - } + R_TRY(thread->GetCoreMask(out_core_id, out_affinity_mask)); return RESULT_SUCCESS; } @@ -1958,58 +1926,33 @@ static ResultCode GetThreadCoreMask32(Core::System& system, Handle thread_handle static ResultCode SetThreadCoreMask(Core::System& system, Handle thread_handle, s32 core_id, u64 affinity_mask) { - LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, core_id=0x{:X}, affinity_mask=0x{:016X}", - thread_handle, core_id, affinity_mask); - - const auto& current_process = *system.Kernel().CurrentProcess(); - // Determine the core id/affinity mask. - if (core_id == Svc::IdealCoreUseProcessValue) { - core_id = current_process.GetIdealCoreId(); + if (core_id == IdealCoreUseProcessValue) { + core_id = system.Kernel().CurrentProcess()->GetIdealCoreId(); affinity_mask = (1ULL << core_id); } else { // Validate the affinity mask. - const u64 process_core_mask = current_process.GetCoreMask(); - if ((affinity_mask | process_core_mask) != process_core_mask) { - LOG_ERROR(Kernel_SVC, - "Affinity mask does match the process core mask (affinity mask={:016X}, core " - "mask={:016X})", - affinity_mask, process_core_mask); - return ResultInvalidCoreId; - } - if (affinity_mask == 0) { - LOG_ERROR(Kernel_SVC, "Affinity mask is zero."); - return ResultInvalidCombination; - } + const u64 process_core_mask = system.Kernel().CurrentProcess()->GetCoreMask(); + R_UNLESS((affinity_mask | process_core_mask) == process_core_mask, ResultInvalidCoreId); + R_UNLESS(affinity_mask != 0, ResultInvalidCombination); // Validate the core id. - if (IsValidCoreId(core_id)) { - if (((1ULL << core_id) & affinity_mask) == 0) { - LOG_ERROR(Kernel_SVC, "Invalid core ID (ID={})", core_id); - return ResultInvalidCombination; - } + if (IsValidVirtualCoreId(core_id)) { + R_UNLESS(((1ULL << core_id) & affinity_mask) != 0, ResultInvalidCombination); } else { - if (core_id != IdealCoreNoUpdate && core_id != IdealCoreDontCare) { - LOG_ERROR(Kernel_SVC, "Invalid core ID (ID={})", core_id); - return ResultInvalidCoreId; - } + R_UNLESS(core_id == IdealCoreNoUpdate || core_id == IdealCoreDontCare, + ResultInvalidCoreId); } } // Get the thread from its handle. - const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); - const std::shared_ptr<KThread> thread = handle_table.Get<KThread>(thread_handle); - if (!thread) { - LOG_ERROR(Kernel_SVC, "Invalid thread handle (handle={:08X})", thread_handle); - return ResultInvalidHandle; - } + KScopedAutoObject thread = + system.Kernel().CurrentProcess()->GetHandleTable().GetObject<KThread>(thread_handle); + R_UNLESS(thread.IsNotNull(), ResultInvalidHandle); // Set the core mask. - const auto set_result = thread->SetCoreMask(core_id, affinity_mask); - if (set_result.IsError()) { - LOG_ERROR(Kernel_SVC, "Unable to successfully set core mask (result={})", set_result.raw); - return set_result; - } + R_TRY(thread->SetCoreMask(core_id, affinity_mask)); + return RESULT_SUCCESS; } @@ -2022,27 +1965,12 @@ static ResultCode SetThreadCoreMask32(Core::System& system, Handle thread_handle static ResultCode SignalEvent(Core::System& system, Handle event_handle) { LOG_DEBUG(Kernel_SVC, "called, event_handle=0x{:08X}", event_handle); - auto& kernel = system.Kernel(); // Get the current handle table. - const HandleTable& handle_table = kernel.CurrentProcess()->GetHandleTable(); - - // Reserve a new event from the process resource limit. - KScopedResourceReservation event_reservation(kernel.CurrentProcess(), - LimitableResource::Events); - if (!event_reservation.Succeeded()) { - LOG_ERROR(Kernel, "Could not reserve a new event"); - return ResultResourceLimitedExceeded; - } + const KHandleTable& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); // Get the writable event. - auto writable_event = handle_table.Get<KWritableEvent>(event_handle); - if (!writable_event) { - LOG_ERROR(Kernel_SVC, "Invalid event handle provided (handle={:08X})", event_handle); - return ResultInvalidHandle; - } - - // Commit the successfuly reservation. - event_reservation.Commit(); + KScopedAutoObject writable_event = handle_table.GetObject<KWritableEvent>(event_handle); + R_UNLESS(writable_event.IsNotNull(), ResultInvalidHandle); return writable_event->Signal(); } @@ -2059,16 +1987,16 @@ static ResultCode ClearEvent(Core::System& system, Handle event_handle) { // Try to clear the writable event. { - auto writable_event = handle_table.Get<KWritableEvent>(event_handle); - if (writable_event) { + KScopedAutoObject writable_event = handle_table.GetObject<KWritableEvent>(event_handle); + if (writable_event.IsNotNull()) { return writable_event->Clear(); } } // Try to clear the readable event. { - auto readable_event = handle_table.Get<KReadableEvent>(event_handle); - if (readable_event) { + KScopedAutoObject readable_event = handle_table.GetObject<KReadableEvent>(event_handle); + if (readable_event.IsNotNull()) { return readable_event->Clear(); } } @@ -2087,34 +2015,40 @@ static ResultCode CreateEvent(Core::System& system, Handle* out_write, Handle* o // Get the kernel reference and handle table. auto& kernel = system.Kernel(); - HandleTable& handle_table = kernel.CurrentProcess()->GetHandleTable(); + auto& handle_table = kernel.CurrentProcess()->GetHandleTable(); + + // Reserve a new event from the process resource limit + KScopedResourceReservation event_reservation(kernel.CurrentProcess(), + LimitableResource::Events); + R_UNLESS(event_reservation.Succeeded(), ResultLimitReached); // Create a new event. - const auto event = KEvent::Create(kernel, "CreateEvent"); - if (!event) { - LOG_ERROR(Kernel_SVC, "Unable to create new events. Event creation limit reached."); - return ResultOutOfResource; - } + KEvent* event = KEvent::Create(kernel); + R_UNLESS(event != nullptr, ResultOutOfResource); // Initialize the event. - event->Initialize(); + event->Initialize("CreateEvent"); + + // Commit the thread reservation. + event_reservation.Commit(); + + // Ensure that we clean up the event (and its only references are handle table) on function end. + SCOPE_EXIT({ + event->GetWritableEvent().Close(); + event->GetReadableEvent().Close(); + }); + + // Register the event. + KEvent::Register(kernel, event); // Add the writable event to the handle table. - const auto write_create_result = handle_table.Create(event->GetWritableEvent()); - if (write_create_result.Failed()) { - return write_create_result.Code(); - } - *out_write = *write_create_result; + R_TRY(handle_table.Add(out_write, std::addressof(event->GetWritableEvent()))); // Add the writable event to the handle table. - auto handle_guard = SCOPE_GUARD({ handle_table.Close(*write_create_result); }); + auto handle_guard = SCOPE_GUARD({ handle_table.Remove(*out_write); }); // Add the readable event to the handle table. - const auto read_create_result = handle_table.Create(event->GetReadableEvent()); - if (read_create_result.Failed()) { - return read_create_result.Code(); - } - *out_read = *read_create_result; + R_TRY(handle_table.Add(out_read, std::addressof(event->GetReadableEvent()))); // We succeeded. handle_guard.Cancel(); @@ -2134,8 +2068,8 @@ static ResultCode GetProcessInfo(Core::System& system, u64* out, Handle process_ }; const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); - const auto process = handle_table.Get<Process>(process_handle); - if (!process) { + KScopedAutoObject process = handle_table.GetObject<KProcess>(process_handle); + if (process.IsNull()) { LOG_ERROR(Kernel_SVC, "Process handle does not exist, process_handle=0x{:08X}", process_handle); return ResultInvalidHandle; @@ -2152,83 +2086,86 @@ static ResultCode GetProcessInfo(Core::System& system, u64* out, Handle process_ } static ResultCode CreateResourceLimit(Core::System& system, Handle* out_handle) { - std::lock_guard lock{HLE::g_hle_lock}; LOG_DEBUG(Kernel_SVC, "called"); + // Create a new resource limit. auto& kernel = system.Kernel(); - auto resource_limit = std::make_shared<KResourceLimit>(kernel, system.CoreTiming()); + KResourceLimit* resource_limit = KResourceLimit::Create(kernel); + R_UNLESS(resource_limit != nullptr, ResultOutOfResource); - auto* const current_process = kernel.CurrentProcess(); - ASSERT(current_process != nullptr); + // Ensure we don't leak a reference to the limit. + SCOPE_EXIT({ resource_limit->Close(); }); - const auto handle = current_process->GetHandleTable().Create(std::move(resource_limit)); - if (handle.Failed()) { - return handle.Code(); - } + // Initialize the resource limit. + resource_limit->Initialize(&system.CoreTiming()); + + // Register the limit. + KResourceLimit::Register(kernel, resource_limit); + + // Add the limit to the handle table. + R_TRY(kernel.CurrentProcess()->GetHandleTable().Add(out_handle, resource_limit)); - *out_handle = *handle; return RESULT_SUCCESS; } -static ResultCode GetResourceLimitLimitValue(Core::System& system, u64* out_value, - Handle resource_limit, u32 resource_type) { - LOG_DEBUG(Kernel_SVC, "called. Handle={:08X}, Resource type={}", resource_limit, resource_type); +static ResultCode GetResourceLimitLimitValue(Core::System& system, u64* out_limit_value, + Handle resource_limit_handle, + LimitableResource which) { + LOG_DEBUG(Kernel_SVC, "called, resource_limit_handle={:08X}, which={}", resource_limit_handle, + which); - const auto limit_value = RetrieveResourceLimitValue(system, resource_limit, resource_type, - ResourceLimitValueType::LimitValue); - if (limit_value.Failed()) { - return limit_value.Code(); - } + // Validate the resource. + R_UNLESS(IsValidResourceType(which), ResultInvalidEnumValue); + + // Get the resource limit. + auto& kernel = system.Kernel(); + KScopedAutoObject resource_limit = + kernel.CurrentProcess()->GetHandleTable().GetObject<KResourceLimit>(resource_limit_handle); + R_UNLESS(resource_limit.IsNotNull(), ResultInvalidHandle); + + // Get the limit value. + *out_limit_value = resource_limit->GetLimitValue(which); - *out_value = static_cast<u64>(*limit_value); return RESULT_SUCCESS; } -static ResultCode GetResourceLimitCurrentValue(Core::System& system, u64* out_value, - Handle resource_limit, u32 resource_type) { - LOG_DEBUG(Kernel_SVC, "called. Handle={:08X}, Resource type={}", resource_limit, resource_type); +static ResultCode GetResourceLimitCurrentValue(Core::System& system, u64* out_current_value, + Handle resource_limit_handle, + LimitableResource which) { + LOG_DEBUG(Kernel_SVC, "called, resource_limit_handle={:08X}, which={}", resource_limit_handle, + which); - const auto current_value = RetrieveResourceLimitValue(system, resource_limit, resource_type, - ResourceLimitValueType::CurrentValue); - if (current_value.Failed()) { - return current_value.Code(); - } + // Validate the resource. + R_UNLESS(IsValidResourceType(which), ResultInvalidEnumValue); + + // Get the resource limit. + auto& kernel = system.Kernel(); + KScopedAutoObject resource_limit = + kernel.CurrentProcess()->GetHandleTable().GetObject<KResourceLimit>(resource_limit_handle); + R_UNLESS(resource_limit.IsNotNull(), ResultInvalidHandle); + + // Get the current value. + *out_current_value = resource_limit->GetCurrentValue(which); - *out_value = static_cast<u64>(*current_value); return RESULT_SUCCESS; } -static ResultCode SetResourceLimitLimitValue(Core::System& system, Handle resource_limit, - u32 resource_type, u64 value) { - LOG_DEBUG(Kernel_SVC, "called. Handle={:08X}, Resource type={}, Value={}", resource_limit, - resource_type, value); +static ResultCode SetResourceLimitLimitValue(Core::System& system, Handle resource_limit_handle, + LimitableResource which, u64 limit_value) { + LOG_DEBUG(Kernel_SVC, "called, resource_limit_handle={:08X}, which={}, limit_value={}", + resource_limit_handle, which, limit_value); - const auto type = static_cast<LimitableResource>(resource_type); - if (!IsValidResourceType(type)) { - LOG_ERROR(Kernel_SVC, "Invalid resource limit type: '{}'", resource_type); - return ResultInvalidEnumValue; - } - - auto* const current_process = system.Kernel().CurrentProcess(); - ASSERT(current_process != nullptr); + // Validate the resource. + R_UNLESS(IsValidResourceType(which), ResultInvalidEnumValue); - auto resource_limit_object = - current_process->GetHandleTable().Get<KResourceLimit>(resource_limit); - if (!resource_limit_object) { - LOG_ERROR(Kernel_SVC, "Handle to non-existent resource limit instance used. Handle={:08X}", - resource_limit); - return ResultInvalidHandle; - } + // Get the resource limit. + auto& kernel = system.Kernel(); + KScopedAutoObject resource_limit = + kernel.CurrentProcess()->GetHandleTable().GetObject<KResourceLimit>(resource_limit_handle); + R_UNLESS(resource_limit.IsNotNull(), ResultInvalidHandle); - const auto set_result = resource_limit_object->SetLimitValue(type, static_cast<s64>(value)); - if (set_result.IsError()) { - LOG_ERROR(Kernel_SVC, - "Attempted to lower resource limit ({}) for category '{}' below its current " - "value ({})", - resource_limit_object->GetLimitValue(type), resource_type, - resource_limit_object->GetCurrentValue(type)); - return set_result; - } + // Set the limit value. + R_TRY(resource_limit->SetLimitValue(which, limit_value)); return RESULT_SUCCESS; } @@ -2351,7 +2288,7 @@ static const FunctionDef SVC_Table_32[] = { {0x11, SvcWrap32<SignalEvent32>, "SignalEvent32"}, {0x12, SvcWrap32<ClearEvent32>, "ClearEvent32"}, {0x13, SvcWrap32<MapSharedMemory32>, "MapSharedMemory32"}, - {0x14, nullptr, "UnmapSharedMemory32"}, + {0x14, SvcWrap32<UnmapSharedMemory32>, "UnmapSharedMemory32"}, {0x15, SvcWrap32<CreateTransferMemory32>, "CreateTransferMemory32"}, {0x16, SvcWrap32<CloseHandle32>, "CloseHandle32"}, {0x17, SvcWrap32<ResetSignal32>, "ResetSignal32"}, @@ -2546,7 +2483,7 @@ static const FunctionDef SVC_Table_64[] = { {0x11, SvcWrap64<SignalEvent>, "SignalEvent"}, {0x12, SvcWrap64<ClearEvent>, "ClearEvent"}, {0x13, SvcWrap64<MapSharedMemory>, "MapSharedMemory"}, - {0x14, nullptr, "UnmapSharedMemory"}, + {0x14, SvcWrap64<UnmapSharedMemory>, "UnmapSharedMemory"}, {0x15, SvcWrap64<CreateTransferMemory>, "CreateTransferMemory"}, {0x16, SvcWrap64<CloseHandle>, "CloseHandle"}, {0x17, SvcWrap64<ResetSignal>, "ResetSignal"}, diff --git a/src/core/hle/kernel/svc_common.h b/src/core/hle/kernel/svc_common.h index 4af049551..60ea2c405 100644 --- a/src/core/hle/kernel/svc_common.h +++ b/src/core/hle/kernel/svc_common.h @@ -6,9 +6,24 @@ #include "common/common_types.h" +namespace Kernel { +using Handle = u32; +} + namespace Kernel::Svc { constexpr s32 ArgumentHandleCountMax = 0x40; constexpr u32 HandleWaitMask{1u << 30}; +constexpr inline Handle InvalidHandle = Handle(0); + +enum PseudoHandle : Handle { + CurrentThread = 0xFFFF8000, + CurrentProcess = 0xFFFF8001, +}; + +constexpr bool IsPseudoHandle(Handle handle) { + return handle == PseudoHandle::CurrentProcess || handle == PseudoHandle::CurrentThread; +} + } // namespace Kernel::Svc diff --git a/src/core/hle/kernel/svc_results.h b/src/core/hle/kernel/svc_results.h index a26d9f2c9..53a940723 100644 --- a/src/core/hle/kernel/svc_results.h +++ b/src/core/hle/kernel/svc_results.h @@ -10,18 +10,18 @@ namespace Kernel { // Confirmed Switch kernel error codes -constexpr ResultCode ResultMaxConnectionsReached{ErrorModule::Kernel, 7}; -constexpr ResultCode ResultInvalidCapabilityDescriptor{ErrorModule::Kernel, 14}; +constexpr ResultCode ResultOutOfSessions{ErrorModule::Kernel, 7}; +constexpr ResultCode ResultInvalidArgument{ErrorModule::Kernel, 14}; constexpr ResultCode ResultNoSynchronizationObject{ErrorModule::Kernel, 57}; constexpr ResultCode ResultTerminationRequested{ErrorModule::Kernel, 59}; constexpr ResultCode ResultInvalidSize{ErrorModule::Kernel, 101}; constexpr ResultCode ResultInvalidAddress{ErrorModule::Kernel, 102}; constexpr ResultCode ResultOutOfResource{ErrorModule::Kernel, 103}; constexpr ResultCode ResultOutOfMemory{ErrorModule::Kernel, 104}; -constexpr ResultCode ResultHandleTableFull{ErrorModule::Kernel, 105}; +constexpr ResultCode ResultOutOfHandles{ErrorModule::Kernel, 105}; constexpr ResultCode ResultInvalidCurrentMemory{ErrorModule::Kernel, 106}; -constexpr ResultCode ResultInvalidMemoryPermissions{ErrorModule::Kernel, 108}; -constexpr ResultCode ResultInvalidMemoryRange{ErrorModule::Kernel, 110}; +constexpr ResultCode ResultInvalidNewMemoryPermission{ErrorModule::Kernel, 108}; +constexpr ResultCode ResultInvalidMemoryRegion{ErrorModule::Kernel, 110}; constexpr ResultCode ResultInvalidPriority{ErrorModule::Kernel, 112}; constexpr ResultCode ResultInvalidCoreId{ErrorModule::Kernel, 113}; constexpr ResultCode ResultInvalidHandle{ErrorModule::Kernel, 114}; @@ -33,9 +33,11 @@ constexpr ResultCode ResultOutOfRange{ErrorModule::Kernel, 119}; constexpr ResultCode ResultInvalidEnumValue{ErrorModule::Kernel, 120}; constexpr ResultCode ResultNotFound{ErrorModule::Kernel, 121}; constexpr ResultCode ResultBusy{ErrorModule::Kernel, 122}; -constexpr ResultCode ResultSessionClosedByRemote{ErrorModule::Kernel, 123}; +constexpr ResultCode ResultSessionClosed{ErrorModule::Kernel, 123}; constexpr ResultCode ResultInvalidState{ErrorModule::Kernel, 125}; -constexpr ResultCode ResultReservedValue{ErrorModule::Kernel, 126}; -constexpr ResultCode ResultResourceLimitedExceeded{ErrorModule::Kernel, 132}; +constexpr ResultCode ResultReservedUsed{ErrorModule::Kernel, 126}; +constexpr ResultCode ResultPortClosed{ErrorModule::Kernel, 131}; +constexpr ResultCode ResultLimitReached{ErrorModule::Kernel, 132}; +constexpr ResultCode ResultInvalidId{ErrorModule::Kernel, 519}; } // namespace Kernel diff --git a/src/core/hle/kernel/svc_wrap.h b/src/core/hle/kernel/svc_wrap.h index 96afd544b..913b16494 100644 --- a/src/core/hle/kernel/svc_wrap.h +++ b/src/core/hle/kernel/svc_wrap.h @@ -154,15 +154,28 @@ void SvcWrap64(Core::System& system) { FuncReturn(system, retval); } +// Used by GetResourceLimitLimitValue. +template <ResultCode func(Core::System&, u64*, Handle, LimitableResource)> +void SvcWrap64(Core::System& system) { + u64 param_1 = 0; + const u32 retval = func(system, ¶m_1, static_cast<Handle>(Param(system, 1)), + static_cast<LimitableResource>(Param(system, 2))) + .raw; + + system.CurrentArmInterface().SetReg(1, param_1); + FuncReturn(system, retval); +} + template <ResultCode func(Core::System&, u32, u64)> void SvcWrap64(Core::System& system) { FuncReturn(system, func(system, static_cast<u32>(Param(system, 0)), Param(system, 1)).raw); } -template <ResultCode func(Core::System&, u32, u32, u64)> +// Used by SetResourceLimitLimitValue +template <ResultCode func(Core::System&, Handle, LimitableResource, u64)> void SvcWrap64(Core::System& system) { - FuncReturn(system, func(system, static_cast<u32>(Param(system, 0)), - static_cast<u32>(Param(system, 1)), Param(system, 2)) + FuncReturn(system, func(system, static_cast<Handle>(Param(system, 0)), + static_cast<LimitableResource>(Param(system, 1)), Param(system, 2)) .raw); } @@ -219,10 +232,11 @@ void SvcWrap64(Core::System& system) { func(system, Param(system, 0), Param(system, 1), static_cast<u32>(Param(system, 2))).raw); } -template <ResultCode func(Core::System&, u32, u64, u64, u32)> +// Used by MapSharedMemory +template <ResultCode func(Core::System&, Handle, u64, u64, Svc::MemoryPermission)> void SvcWrap64(Core::System& system) { - FuncReturn(system, func(system, static_cast<u32>(Param(system, 0)), Param(system, 1), - Param(system, 2), static_cast<u32>(Param(system, 3))) + FuncReturn(system, func(system, static_cast<Handle>(Param(system, 0)), Param(system, 1), + Param(system, 2), static_cast<Svc::MemoryPermission>(Param(system, 3))) .raw); } @@ -252,11 +266,13 @@ void SvcWrap64(Core::System& system) { .raw); } -template <ResultCode func(Core::System&, u64*, u64, u64, u64)> +// Used by GetInfo +template <ResultCode func(Core::System&, u64*, u64, Handle, u64)> void SvcWrap64(Core::System& system) { u64 param_1 = 0; - const u32 retval = - func(system, ¶m_1, Param(system, 1), Param(system, 2), Param(system, 3)).raw; + const u32 retval = func(system, ¶m_1, Param(system, 1), + static_cast<Handle>(Param(system, 2)), Param(system, 3)) + .raw; system.CurrentArmInterface().SetReg(1, param_1); FuncReturn(system, retval); @@ -273,11 +289,12 @@ void SvcWrap64(Core::System& system) { FuncReturn(system, retval); } -template <ResultCode func(Core::System&, u32*, u64, u64, u32)> +// Used by CreateTransferMemory +template <ResultCode func(Core::System&, Handle*, u64, u64, Svc::MemoryPermission)> void SvcWrap64(Core::System& system) { u32 param_1 = 0; const u32 retval = func(system, ¶m_1, Param(system, 1), Param(system, 2), - static_cast<u32>(Param(system, 3))) + static_cast<Svc::MemoryPermission>(Param(system, 3))) .raw; system.CurrentArmInterface().SetReg(1, param_1); @@ -537,6 +554,16 @@ void SvcWrap32(Core::System& system) { FuncReturn(system, retval); } +// Used by MapSharedMemory32 +template <ResultCode func(Core::System&, Handle, u32, u32, Svc::MemoryPermission)> +void SvcWrap32(Core::System& system) { + const u32 retval = func(system, static_cast<Handle>(Param(system, 0)), + static_cast<u32>(Param(system, 1)), static_cast<u32>(Param(system, 2)), + static_cast<Svc::MemoryPermission>(Param(system, 3))) + .raw; + FuncReturn(system, retval); +} + // Used by SetThreadCoreMask32 template <ResultCode func(Core::System&, Handle, s32, u32, u32)> void SvcWrap32(Core::System& system) { @@ -586,11 +613,12 @@ void SvcWrap32(Core::System& system) { } // Used by CreateTransferMemory32 -template <ResultCode func(Core::System&, Handle*, u32, u32, u32)> +template <ResultCode func(Core::System&, Handle*, u32, u32, Svc::MemoryPermission)> void SvcWrap32(Core::System& system) { Handle handle = 0; - const u32 retval = - func(system, &handle, Param32(system, 1), Param32(system, 2), Param32(system, 3)).raw; + const u32 retval = func(system, &handle, Param32(system, 1), Param32(system, 2), + static_cast<Svc::MemoryPermission>(Param32(system, 3))) + .raw; system.CurrentArmInterface().SetReg(1, handle); FuncReturn(system, retval); } diff --git a/src/core/hle/kernel/time_manager.cpp b/src/core/hle/kernel/time_manager.cpp index fd0630019..ae9b4be2f 100644 --- a/src/core/hle/kernel/time_manager.cpp +++ b/src/core/hle/kernel/time_manager.cpp @@ -6,7 +6,6 @@ #include "core/core.h" #include "core/core_timing.h" #include "core/core_timing_util.h" -#include "core/hle/kernel/handle_table.h" #include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/k_thread.h" #include "core/hle/kernel/kernel.h" @@ -15,16 +14,12 @@ namespace Kernel { TimeManager::TimeManager(Core::System& system_) : system{system_} { - time_manager_event_type = Core::Timing::CreateEvent( - "Kernel::TimeManagerCallback", - [this](std::uintptr_t thread_handle, std::chrono::nanoseconds) { - std::shared_ptr<KThread> thread; - { - std::lock_guard lock{mutex}; - thread = SharedFrom<KThread>(reinterpret_cast<KThread*>(thread_handle)); - } - thread->Wakeup(); - }); + time_manager_event_type = + Core::Timing::CreateEvent("Kernel::TimeManagerCallback", + [this](std::uintptr_t thread_handle, std::chrono::nanoseconds) { + KThread* thread = reinterpret_cast<KThread*>(thread_handle); + thread->Wakeup(); + }); } void TimeManager::ScheduleTimeEvent(KThread* thread, s64 nanoseconds) { diff --git a/src/core/hle/kernel/time_manager.h b/src/core/hle/kernel/time_manager.h index 0d7f05f30..2d175a9c4 100644 --- a/src/core/hle/kernel/time_manager.h +++ b/src/core/hle/kernel/time_manager.h @@ -8,8 +8,6 @@ #include <mutex> #include <unordered_map> -#include "core/hle/kernel/object.h" - namespace Core { class System; } // namespace Core |