diff options
Diffstat (limited to 'src/core')
37 files changed, 771 insertions, 268 deletions
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index bbbe60896..9e23afe85 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt @@ -70,6 +70,8 @@ add_library(core STATIC file_sys/system_archive/ng_word.h file_sys/system_archive/system_archive.cpp file_sys/system_archive/system_archive.h + file_sys/system_archive/system_version.cpp + file_sys/system_archive/system_version.h file_sys/vfs.cpp file_sys/vfs.h file_sys/vfs_concat.cpp @@ -144,6 +146,8 @@ add_library(core STATIC hle/kernel/svc_wrap.h hle/kernel/thread.cpp hle/kernel/thread.h + hle/kernel/transfer_memory.cpp + hle/kernel/transfer_memory.h hle/kernel/vm_manager.cpp hle/kernel/vm_manager.h hle/kernel/wait_object.cpp diff --git a/src/core/core_cpu.cpp b/src/core/core_cpu.cpp index 1eefed6d0..e75741db0 100644 --- a/src/core/core_cpu.cpp +++ b/src/core/core_cpu.cpp @@ -22,7 +22,7 @@ namespace Core { void CpuBarrier::NotifyEnd() { - std::unique_lock<std::mutex> lock(mutex); + std::unique_lock lock{mutex}; end = true; condition.notify_all(); } @@ -34,7 +34,7 @@ bool CpuBarrier::Rendezvous() { } if (!end) { - std::unique_lock<std::mutex> lock(mutex); + std::unique_lock lock{mutex}; --cores_waiting; if (!cores_waiting) { @@ -131,7 +131,7 @@ void Cpu::Reschedule() { reschedule_pending = false; // Lock the global kernel mutex when we manipulate the HLE state - std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock); + std::lock_guard lock{HLE::g_hle_lock}; scheduler->Reschedule(); } diff --git a/src/core/core_timing.cpp b/src/core/core_timing.cpp index a0dd5db24..41adb2302 100644 --- a/src/core/core_timing.cpp +++ b/src/core/core_timing.cpp @@ -186,7 +186,7 @@ void CoreTiming::Advance() { Event evt = std::move(event_queue.front()); std::pop_heap(event_queue.begin(), event_queue.end(), std::greater<>()); event_queue.pop_back(); - evt.type->callback(evt.userdata, static_cast<int>(global_timer - evt.time)); + evt.type->callback(evt.userdata, global_timer - evt.time); } is_global_timer_sane = false; diff --git a/src/core/core_timing.h b/src/core/core_timing.h index 59163bae1..9d2efde37 100644 --- a/src/core/core_timing.h +++ b/src/core/core_timing.h @@ -15,7 +15,7 @@ namespace Core::Timing { /// A callback that may be scheduled for a particular core timing event. -using TimedCallback = std::function<void(u64 userdata, int cycles_late)>; +using TimedCallback = std::function<void(u64 userdata, s64 cycles_late)>; /// Contains the characteristics of a particular event. struct EventType { diff --git a/src/core/file_sys/cheat_engine.cpp b/src/core/file_sys/cheat_engine.cpp index 247fbc864..b06c2f20a 100644 --- a/src/core/file_sys/cheat_engine.cpp +++ b/src/core/file_sys/cheat_engine.cpp @@ -423,6 +423,7 @@ std::array<u8, 16> TextCheatParser::ParseSingleLineCheat(const std::string& line return out; } +namespace { u64 MemoryReadImpl(u32 width, VAddr addr) { switch (width) { case 1: @@ -457,6 +458,7 @@ void MemoryWriteImpl(u32 width, VAddr addr, u64 value) { UNREACHABLE(); } } +} // Anonymous namespace CheatEngine::CheatEngine(Core::System& system, std::vector<CheatList> cheats_, const std::string& build_id, VAddr code_region_start, diff --git a/src/core/file_sys/errors.h b/src/core/file_sys/errors.h index e4a4ee4ab..bb4654366 100644 --- a/src/core/file_sys/errors.h +++ b/src/core/file_sys/errors.h @@ -11,6 +11,9 @@ namespace FileSys { constexpr ResultCode ERROR_PATH_NOT_FOUND{ErrorModule::FS, 1}; constexpr ResultCode ERROR_ENTITY_NOT_FOUND{ErrorModule::FS, 1002}; constexpr ResultCode ERROR_SD_CARD_NOT_FOUND{ErrorModule::FS, 2001}; +constexpr ResultCode ERROR_OUT_OF_BOUNDS{ErrorModule::FS, 3005}; +constexpr ResultCode ERROR_FAILED_MOUNT_ARCHIVE{ErrorModule::FS, 3223}; +constexpr ResultCode ERROR_INVALID_ARGUMENT{ErrorModule::FS, 6001}; constexpr ResultCode ERROR_INVALID_OFFSET{ErrorModule::FS, 6061}; constexpr ResultCode ERROR_INVALID_SIZE{ErrorModule::FS, 6062}; diff --git a/src/core/file_sys/patch_manager.cpp b/src/core/file_sys/patch_manager.cpp index 58884b4a0..e11217708 100644 --- a/src/core/file_sys/patch_manager.cpp +++ b/src/core/file_sys/patch_manager.cpp @@ -20,6 +20,7 @@ #include "core/file_sys/vfs_vector.h" #include "core/hle/service/filesystem/filesystem.h" #include "core/loader/loader.h" +#include "core/loader/nso.h" #include "core/settings.h" namespace FileSys { @@ -32,14 +33,6 @@ constexpr std::array<const char*, 14> EXEFS_FILE_NAMES{ "subsdk3", "subsdk4", "subsdk5", "subsdk6", "subsdk7", "subsdk8", "subsdk9", }; -struct NSOBuildHeader { - u32_le magic; - INSERT_PADDING_BYTES(0x3C); - std::array<u8, 0x20> build_id; - INSERT_PADDING_BYTES(0xA0); -}; -static_assert(sizeof(NSOBuildHeader) == 0x100, "NSOBuildHeader has incorrect size."); - std::string FormatTitleVersion(u32 version, TitleVersionFormat format) { std::array<u8, sizeof(u32)> bytes{}; bytes[0] = version % SINGLE_BYTE_MODULUS; @@ -163,14 +156,16 @@ std::vector<VirtualFile> PatchManager::CollectPatches(const std::vector<VirtualD } std::vector<u8> PatchManager::PatchNSO(const std::vector<u8>& nso) const { - if (nso.size() < 0x100) + if (nso.size() < sizeof(Loader::NSOHeader)) { return nso; + } - NSOBuildHeader header; - std::memcpy(&header, nso.data(), sizeof(NSOBuildHeader)); + Loader::NSOHeader header; + std::memcpy(&header, nso.data(), sizeof(header)); - if (header.magic != Common::MakeMagic('N', 'S', 'O', '0')) + if (header.magic != Common::MakeMagic('N', 'S', 'O', '0')) { return nso; + } const auto build_id_raw = Common::HexArrayToString(header.build_id); const auto build_id = build_id_raw.substr(0, build_id_raw.find_last_not_of('0') + 1); @@ -213,9 +208,11 @@ std::vector<u8> PatchManager::PatchNSO(const std::vector<u8>& nso) const { } } - if (out.size() < 0x100) + if (out.size() < sizeof(Loader::NSOHeader)) { return nso; - std::memcpy(out.data(), &header, sizeof(NSOBuildHeader)); + } + + std::memcpy(out.data(), &header, sizeof(header)); return out; } diff --git a/src/core/file_sys/system_archive/system_archive.cpp b/src/core/file_sys/system_archive/system_archive.cpp index e3e79f40a..c9722ed77 100644 --- a/src/core/file_sys/system_archive/system_archive.cpp +++ b/src/core/file_sys/system_archive/system_archive.cpp @@ -6,6 +6,7 @@ #include "core/file_sys/romfs.h" #include "core/file_sys/system_archive/ng_word.h" #include "core/file_sys/system_archive/system_archive.h" +#include "core/file_sys/system_archive/system_version.h" namespace FileSys::SystemArchive { @@ -30,7 +31,7 @@ constexpr std::array<SystemArchiveDescriptor, SYSTEM_ARCHIVE_COUNT> SYSTEM_ARCHI {0x0100000000000806, "NgWord", &NgWord1}, {0x0100000000000807, "SsidList", nullptr}, {0x0100000000000808, "Dictionary", nullptr}, - {0x0100000000000809, "SystemVersion", nullptr}, + {0x0100000000000809, "SystemVersion", &SystemVersion}, {0x010000000000080A, "AvatarImage", nullptr}, {0x010000000000080B, "LocalNews", nullptr}, {0x010000000000080C, "Eula", nullptr}, diff --git a/src/core/file_sys/system_archive/system_version.cpp b/src/core/file_sys/system_archive/system_version.cpp new file mode 100644 index 000000000..6e22f97b0 --- /dev/null +++ b/src/core/file_sys/system_archive/system_version.cpp @@ -0,0 +1,52 @@ +// Copyright 2019 yuzu emulator team +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "core/file_sys/system_archive/system_version.h" +#include "core/file_sys/vfs_vector.h" + +namespace FileSys::SystemArchive { + +namespace SystemVersionData { + +// This section should reflect the best system version to describe yuzu's HLE api. +// TODO(DarkLordZach): Update when HLE gets better. + +constexpr u8 VERSION_MAJOR = 5; +constexpr u8 VERSION_MINOR = 1; +constexpr u8 VERSION_MICRO = 0; + +constexpr u8 REVISION_MAJOR = 3; +constexpr u8 REVISION_MINOR = 0; + +constexpr char PLATFORM_STRING[] = "NX"; +constexpr char VERSION_HASH[] = "23f9df53e25709d756e0c76effcb2473bd3447dd"; +constexpr char DISPLAY_VERSION[] = "5.1.0"; +constexpr char DISPLAY_TITLE[] = "NintendoSDK Firmware for NX 5.1.0-3.0"; + +} // namespace SystemVersionData + +std::string GetLongDisplayVersion() { + return SystemVersionData::DISPLAY_TITLE; +} + +VirtualDir SystemVersion() { + VirtualFile file = std::make_shared<VectorVfsFile>(std::vector<u8>(0x100), "file"); + file->WriteObject(SystemVersionData::VERSION_MAJOR, 0); + file->WriteObject(SystemVersionData::VERSION_MINOR, 1); + file->WriteObject(SystemVersionData::VERSION_MICRO, 2); + file->WriteObject(SystemVersionData::REVISION_MAJOR, 4); + file->WriteObject(SystemVersionData::REVISION_MINOR, 5); + file->WriteArray(SystemVersionData::PLATFORM_STRING, + std::min<u64>(sizeof(SystemVersionData::PLATFORM_STRING), 0x20ULL), 0x8); + file->WriteArray(SystemVersionData::VERSION_HASH, + std::min<u64>(sizeof(SystemVersionData::VERSION_HASH), 0x40ULL), 0x28); + file->WriteArray(SystemVersionData::DISPLAY_VERSION, + std::min<u64>(sizeof(SystemVersionData::DISPLAY_VERSION), 0x18ULL), 0x68); + file->WriteArray(SystemVersionData::DISPLAY_TITLE, + std::min<u64>(sizeof(SystemVersionData::DISPLAY_TITLE), 0x80ULL), 0x80); + return std::make_shared<VectorVfsDirectory>(std::vector<VirtualFile>{file}, + std::vector<VirtualDir>{}, "data"); +} + +} // namespace FileSys::SystemArchive diff --git a/src/core/file_sys/system_archive/system_version.h b/src/core/file_sys/system_archive/system_version.h new file mode 100644 index 000000000..deed79b26 --- /dev/null +++ b/src/core/file_sys/system_archive/system_version.h @@ -0,0 +1,16 @@ +// Copyright 2019 yuzu emulator team +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include <string> +#include "core/file_sys/vfs_types.h" + +namespace FileSys::SystemArchive { + +std::string GetLongDisplayVersion(); + +VirtualDir SystemVersion(); + +} // namespace FileSys::SystemArchive diff --git a/src/core/frontend/emu_window.cpp b/src/core/frontend/emu_window.cpp index e29afd630..1320bbe77 100644 --- a/src/core/frontend/emu_window.cpp +++ b/src/core/frontend/emu_window.cpp @@ -30,7 +30,7 @@ private: explicit Device(std::weak_ptr<TouchState>&& touch_state) : touch_state(touch_state) {} std::tuple<float, float, bool> GetStatus() const override { if (auto state = touch_state.lock()) { - std::lock_guard<std::mutex> guard(state->mutex); + std::lock_guard guard{state->mutex}; return std::make_tuple(state->touch_x, state->touch_y, state->touch_pressed); } return std::make_tuple(0.0f, 0.0f, false); @@ -81,7 +81,7 @@ void EmuWindow::TouchPressed(unsigned framebuffer_x, unsigned framebuffer_y) { if (!IsWithinTouchscreen(framebuffer_layout, framebuffer_x, framebuffer_y)) return; - std::lock_guard<std::mutex> guard(touch_state->mutex); + std::lock_guard guard{touch_state->mutex}; touch_state->touch_x = static_cast<float>(framebuffer_x - framebuffer_layout.screen.left) / (framebuffer_layout.screen.right - framebuffer_layout.screen.left); touch_state->touch_y = static_cast<float>(framebuffer_y - framebuffer_layout.screen.top) / @@ -91,7 +91,7 @@ void EmuWindow::TouchPressed(unsigned framebuffer_x, unsigned framebuffer_y) { } void EmuWindow::TouchReleased() { - std::lock_guard<std::mutex> guard(touch_state->mutex); + std::lock_guard guard{touch_state->mutex}; touch_state->touch_pressed = false; touch_state->touch_x = 0; touch_state->touch_y = 0; diff --git a/src/core/hle/kernel/address_arbiter.cpp b/src/core/hle/kernel/address_arbiter.cpp index 352190da8..c8842410b 100644 --- a/src/core/hle/kernel/address_arbiter.cpp +++ b/src/core/hle/kernel/address_arbiter.cpp @@ -26,7 +26,7 @@ void WakeThreads(const std::vector<SharedPtr<Thread>>& waiting_threads, s32 num_ // them all. std::size_t last = waiting_threads.size(); if (num_to_wake > 0) { - last = num_to_wake; + last = std::min(last, static_cast<std::size_t>(num_to_wake)); } // Signal the waiting threads. @@ -90,9 +90,9 @@ ResultCode AddressArbiter::ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr a // Determine the modified value depending on the waiting count. s32 updated_value; if (waiting_threads.empty()) { - updated_value = value - 1; - } else if (num_to_wake <= 0 || waiting_threads.size() <= static_cast<u32>(num_to_wake)) { updated_value = value + 1; + } else if (num_to_wake <= 0 || waiting_threads.size() <= static_cast<u32>(num_to_wake)) { + updated_value = value - 1; } else { updated_value = value; } diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index 4d224d01d..6baeb3494 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp @@ -29,12 +29,12 @@ namespace Kernel { * @param thread_handle The handle of the thread that's been awoken * @param cycles_late The number of CPU cycles that have passed since the desired wakeup time */ -static void ThreadWakeupCallback(u64 thread_handle, [[maybe_unused]] int cycles_late) { +static void ThreadWakeupCallback(u64 thread_handle, [[maybe_unused]] s64 cycles_late) { const auto proper_handle = static_cast<Handle>(thread_handle); const auto& system = Core::System::GetInstance(); // Lock the global kernel mutex when we enter the kernel HLE. - std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock); + std::lock_guard lock{HLE::g_hle_lock}; SharedPtr<Thread> thread = system.Kernel().RetrieveThreadFromWakeupCallbackHandleTable(proper_handle); @@ -62,7 +62,8 @@ static void ThreadWakeupCallback(u64 thread_handle, [[maybe_unused]] int cycles_ if (thread->GetMutexWaitAddress() != 0 || thread->GetCondVarWaitAddress() != 0 || thread->GetWaitHandle() != 0) { - ASSERT(thread->GetStatus() == ThreadStatus::WaitMutex); + ASSERT(thread->GetStatus() == ThreadStatus::WaitMutex || + thread->GetStatus() == ThreadStatus::WaitCondVar); thread->SetMutexWaitAddress(0); thread->SetCondVarWaitAddress(0); thread->SetWaitHandle(0); diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h index ff17ff865..03ea5b659 100644 --- a/src/core/hle/kernel/kernel.h +++ b/src/core/hle/kernel/kernel.h @@ -8,9 +8,6 @@ #include <unordered_map> #include "core/hle/kernel/object.h" -template <typename T> -class ResultVal; - namespace Core { class System; } diff --git a/src/core/hle/kernel/object.cpp b/src/core/hle/kernel/object.cpp index 8870463d0..217144efc 100644 --- a/src/core/hle/kernel/object.cpp +++ b/src/core/hle/kernel/object.cpp @@ -23,6 +23,7 @@ bool Object::IsWaitable() const { case HandleType::Unknown: case HandleType::WritableEvent: case HandleType::SharedMemory: + case HandleType::TransferMemory: case HandleType::AddressArbiter: case HandleType::ResourceLimit: case HandleType::ClientPort: diff --git a/src/core/hle/kernel/object.h b/src/core/hle/kernel/object.h index 4c2505908..3f6baa094 100644 --- a/src/core/hle/kernel/object.h +++ b/src/core/hle/kernel/object.h @@ -22,6 +22,7 @@ enum class HandleType : u32 { WritableEvent, ReadableEvent, SharedMemory, + TransferMemory, Thread, Process, AddressArbiter, diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp index 0d782e4ba..b0b7af76b 100644 --- a/src/core/hle/kernel/process.cpp +++ b/src/core/hle/kernel/process.cpp @@ -5,6 +5,7 @@ #include <algorithm> #include <memory> #include <random> +#include "common/alignment.h" #include "common/assert.h" #include "common/logging/log.h" #include "core/core.h" @@ -75,6 +76,10 @@ SharedPtr<ResourceLimit> Process::GetResourceLimit() const { return resource_limit; } +u64 Process::GetTotalPhysicalMemoryUsed() const { + return vm_manager.GetCurrentHeapSize() + main_thread_stack_size + code_memory_size; +} + ResultCode Process::ClearSignalState() { if (status == ProcessStatus::Exited) { LOG_ERROR(Kernel, "called on a terminated process instance."); @@ -107,14 +112,17 @@ ResultCode Process::LoadFromMetadata(const FileSys::ProgramMetadata& metadata) { return handle_table.SetSize(capabilities.GetHandleTableSize()); } -void Process::Run(VAddr entry_point, s32 main_thread_priority, u32 stack_size) { +void Process::Run(VAddr entry_point, s32 main_thread_priority, u64 stack_size) { + // The kernel always ensures that the given stack size is page aligned. + main_thread_stack_size = Common::AlignUp(stack_size, Memory::PAGE_SIZE); + // Allocate and map the main thread stack // TODO(bunnei): This is heap area that should be allocated by the kernel and not mapped as part // of the user address space. + const VAddr mapping_address = vm_manager.GetTLSIORegionEndAddress() - main_thread_stack_size; vm_manager - .MapMemoryBlock(vm_manager.GetTLSIORegionEndAddress() - stack_size, - std::make_shared<std::vector<u8>>(stack_size, 0), 0, stack_size, - MemoryState::Stack) + .MapMemoryBlock(mapping_address, std::make_shared<std::vector<u8>>(main_thread_stack_size), + 0, main_thread_stack_size, MemoryState::Stack) .Unwrap(); vm_manager.LogLayout(); @@ -224,6 +232,8 @@ void Process::LoadModule(CodeSet module_, VAddr base_addr) { MapSegment(module_.RODataSegment(), VMAPermission::Read, MemoryState::CodeData); MapSegment(module_.DataSegment(), VMAPermission::ReadWrite, MemoryState::CodeData); + code_memory_size += module_.memory->size(); + // Clear instruction cache in CPU JIT system.InvalidateCpuInstructionCaches(); } diff --git a/src/core/hle/kernel/process.h b/src/core/hle/kernel/process.h index 1bd7bf5c1..732d12170 100644 --- a/src/core/hle/kernel/process.h +++ b/src/core/hle/kernel/process.h @@ -35,14 +35,6 @@ class Thread; struct CodeSet; -struct AddressMapping { - // Address and size must be page-aligned - VAddr address; - u64 size; - bool read_only; - bool unk_flag; -}; - enum class MemoryRegion : u16 { APPLICATION = 1, SYSTEM = 2, @@ -194,6 +186,9 @@ public: return random_entropy.at(index); } + /// Retrieves the total physical memory used by this process in bytes. + u64 GetTotalPhysicalMemoryUsed() const; + /// Clears the signaled state of the process if and only if it's signaled. /// /// @pre The process must not be already terminated. If this is called on a @@ -218,7 +213,7 @@ public: /** * Applies address space changes and launches the process main thread. */ - void Run(VAddr entry_point, s32 main_thread_priority, u32 stack_size); + void Run(VAddr entry_point, s32 main_thread_priority, u64 stack_size); /** * Prepares a process for termination by stopping all of its threads @@ -255,6 +250,12 @@ private: /// Memory manager for this process. Kernel::VMManager vm_manager; + /// Size of the main thread's stack in bytes. + u64 main_thread_stack_size = 0; + + /// Size of the loaded code memory in bytes. + u64 code_memory_size = 0; + /// Current status of the process ProcessStatus status; diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp index cc189cc64..ac501bf7f 100644 --- a/src/core/hle/kernel/scheduler.cpp +++ b/src/core/hle/kernel/scheduler.cpp @@ -29,8 +29,8 @@ Scheduler::~Scheduler() { } bool Scheduler::HaveReadyThreads() const { - std::lock_guard<std::mutex> lock(scheduler_mutex); - return ready_queue.get_first() != nullptr; + std::lock_guard lock{scheduler_mutex}; + return !ready_queue.empty(); } Thread* Scheduler::GetCurrentThread() const { @@ -46,22 +46,27 @@ Thread* Scheduler::PopNextReadyThread() { Thread* thread = GetCurrentThread(); if (thread && thread->GetStatus() == ThreadStatus::Running) { + if (ready_queue.empty()) { + return thread; + } // We have to do better than the current thread. // This call returns null when that's not possible. - next = ready_queue.pop_first_better(thread->GetPriority()); - if (!next) { - // Otherwise just keep going with the current thread + next = ready_queue.front(); + if (next == nullptr || next->GetPriority() >= thread->GetPriority()) { next = thread; } } else { - next = ready_queue.pop_first(); + if (ready_queue.empty()) { + return nullptr; + } + next = ready_queue.front(); } return next; } void Scheduler::SwitchContext(Thread* new_thread) { - Thread* const previous_thread = GetCurrentThread(); + Thread* previous_thread = GetCurrentThread(); Process* const previous_process = system.Kernel().CurrentProcess(); UpdateLastContextSwitchTime(previous_thread, previous_process); @@ -75,7 +80,7 @@ void Scheduler::SwitchContext(Thread* new_thread) { if (previous_thread->GetStatus() == ThreadStatus::Running) { // This is only the case when a reschedule is triggered without the current thread // yielding execution (i.e. an event triggered, system core time-sliced, etc) - ready_queue.push_front(previous_thread->GetPriority(), previous_thread); + ready_queue.add(previous_thread, previous_thread->GetPriority(), false); previous_thread->SetStatus(ThreadStatus::Ready); } } @@ -90,7 +95,7 @@ void Scheduler::SwitchContext(Thread* new_thread) { current_thread = new_thread; - ready_queue.remove(new_thread->GetPriority(), new_thread); + ready_queue.remove(new_thread, new_thread->GetPriority()); new_thread->SetStatus(ThreadStatus::Running); auto* const thread_owner_process = current_thread->GetOwnerProcess(); @@ -127,7 +132,7 @@ void Scheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) { } void Scheduler::Reschedule() { - std::lock_guard<std::mutex> lock(scheduler_mutex); + std::lock_guard lock{scheduler_mutex}; Thread* cur = GetCurrentThread(); Thread* next = PopNextReadyThread(); @@ -143,51 +148,54 @@ void Scheduler::Reschedule() { SwitchContext(next); } -void Scheduler::AddThread(SharedPtr<Thread> thread, u32 priority) { - std::lock_guard<std::mutex> lock(scheduler_mutex); +void Scheduler::AddThread(SharedPtr<Thread> thread) { + std::lock_guard lock{scheduler_mutex}; thread_list.push_back(std::move(thread)); - ready_queue.prepare(priority); } void Scheduler::RemoveThread(Thread* thread) { - std::lock_guard<std::mutex> lock(scheduler_mutex); + std::lock_guard lock{scheduler_mutex}; thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread), thread_list.end()); } void Scheduler::ScheduleThread(Thread* thread, u32 priority) { - std::lock_guard<std::mutex> lock(scheduler_mutex); + std::lock_guard lock{scheduler_mutex}; ASSERT(thread->GetStatus() == ThreadStatus::Ready); - ready_queue.push_back(priority, thread); + ready_queue.add(thread, priority); } void Scheduler::UnscheduleThread(Thread* thread, u32 priority) { - std::lock_guard<std::mutex> lock(scheduler_mutex); + std::lock_guard lock{scheduler_mutex}; ASSERT(thread->GetStatus() == ThreadStatus::Ready); - ready_queue.remove(priority, thread); + ready_queue.remove(thread, priority); } void Scheduler::SetThreadPriority(Thread* thread, u32 priority) { - std::lock_guard<std::mutex> lock(scheduler_mutex); + std::lock_guard lock{scheduler_mutex}; + if (thread->GetPriority() == priority) { + return; + } // If thread was ready, adjust queues if (thread->GetStatus() == ThreadStatus::Ready) - ready_queue.move(thread, thread->GetPriority(), priority); - else - ready_queue.prepare(priority); + ready_queue.adjust(thread, thread->GetPriority(), priority); } Thread* Scheduler::GetNextSuggestedThread(u32 core, u32 maximum_priority) const { - std::lock_guard<std::mutex> lock(scheduler_mutex); + std::lock_guard lock{scheduler_mutex}; const u32 mask = 1U << core; - return ready_queue.get_first_filter([mask, maximum_priority](Thread const* thread) { - return (thread->GetAffinityMask() & mask) != 0 && thread->GetPriority() < maximum_priority; - }); + for (auto* thread : ready_queue) { + if ((thread->GetAffinityMask() & mask) != 0 && thread->GetPriority() < maximum_priority) { + return thread; + } + } + return nullptr; } void Scheduler::YieldWithoutLoadBalancing(Thread* thread) { diff --git a/src/core/hle/kernel/scheduler.h b/src/core/hle/kernel/scheduler.h index 1c5bf57d9..b29bf7be8 100644 --- a/src/core/hle/kernel/scheduler.h +++ b/src/core/hle/kernel/scheduler.h @@ -7,7 +7,7 @@ #include <mutex> #include <vector> #include "common/common_types.h" -#include "common/thread_queue_list.h" +#include "common/multi_level_queue.h" #include "core/hle/kernel/object.h" #include "core/hle/kernel/thread.h" @@ -38,7 +38,7 @@ public: u64 GetLastContextSwitchTicks() const; /// Adds a new thread to the scheduler - void AddThread(SharedPtr<Thread> thread, u32 priority); + void AddThread(SharedPtr<Thread> thread); /// Removes a thread from the scheduler void RemoveThread(Thread* thread); @@ -156,7 +156,7 @@ private: std::vector<SharedPtr<Thread>> thread_list; /// Lists only ready thread ids. - Common::ThreadQueueList<Thread*, THREADPRIO_LOWEST + 1> ready_queue; + Common::MultiLevelQueue<Thread*, THREADPRIO_LOWEST + 1> ready_queue; SharedPtr<Thread> current_thread = nullptr; diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index a6a17efe7..76a8b0191 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp @@ -32,6 +32,7 @@ #include "core/hle/kernel/svc.h" #include "core/hle/kernel/svc_wrap.h" #include "core/hle/kernel/thread.h" +#include "core/hle/kernel/transfer_memory.h" #include "core/hle/kernel/writable_event.h" #include "core/hle/lock.h" #include "core/hle/result.h" @@ -174,11 +175,8 @@ static ResultCode SetHeapSize(VAddr* heap_addr, u64 heap_size) { return ERR_INVALID_SIZE; } - auto& vm_manager = Core::CurrentProcess()->VMManager(); - const VAddr heap_base = vm_manager.GetHeapRegionBaseAddress(); - const auto alloc_result = - vm_manager.HeapAllocate(heap_base, heap_size, VMAPermission::ReadWrite); - + auto& vm_manager = Core::System::GetInstance().Kernel().CurrentProcess()->VMManager(); + const auto alloc_result = vm_manager.SetHeapSize(heap_size); if (alloc_result.Failed()) { return alloc_result.Code(); } @@ -711,7 +709,7 @@ static ResultCode GetInfo(u64* result, u64 info_id, u64 handle, u64 info_sub_id) HeapRegionBaseAddr = 4, HeapRegionSize = 5, TotalMemoryUsage = 6, - TotalHeapUsage = 7, + TotalPhysicalMemoryUsed = 7, IsCurrentProcessBeingDebugged = 8, RegisterResourceLimit = 9, IdleTickCount = 10, @@ -747,7 +745,7 @@ static ResultCode GetInfo(u64* result, u64 info_id, u64 handle, u64 info_sub_id) case GetInfoType::NewMapRegionBaseAddr: case GetInfoType::NewMapRegionSize: case GetInfoType::TotalMemoryUsage: - case GetInfoType::TotalHeapUsage: + case GetInfoType::TotalPhysicalMemoryUsed: case GetInfoType::IsVirtualAddressMemoryEnabled: case GetInfoType::PersonalMmHeapUsage: case GetInfoType::TitleId: @@ -807,8 +805,8 @@ static ResultCode GetInfo(u64* result, u64 info_id, u64 handle, u64 info_sub_id) *result = process->VMManager().GetTotalMemoryUsage(); return RESULT_SUCCESS; - case GetInfoType::TotalHeapUsage: - *result = process->VMManager().GetTotalHeapUsage(); + case GetInfoType::TotalPhysicalMemoryUsed: + *result = process->GetTotalPhysicalMemoryUsed(); return RESULT_SUCCESS; case GetInfoType::IsVirtualAddressMemoryEnabled: @@ -1355,7 +1353,7 @@ static ResultCode WaitProcessWideKeyAtomic(VAddr mutex_addr, VAddr condition_var current_thread->SetCondVarWaitAddress(condition_variable_addr); current_thread->SetMutexWaitAddress(mutex_addr); current_thread->SetWaitHandle(thread_handle); - current_thread->SetStatus(ThreadStatus::WaitMutex); + current_thread->SetStatus(ThreadStatus::WaitCondVar); current_thread->InvalidateWakeupCallback(); current_thread->WakeAfterDelay(nano_seconds); @@ -1399,10 +1397,10 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target // them all. std::size_t last = waiting_threads.size(); if (target != -1) - last = target; + last = std::min(waiting_threads.size(), static_cast<std::size_t>(target)); // If there are no threads waiting on this condition variable, just exit - if (last > waiting_threads.size()) + if (last == 0) return RESULT_SUCCESS; for (std::size_t index = 0; index < last; ++index) { @@ -1410,6 +1408,9 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target ASSERT(thread->GetCondVarWaitAddress() == condition_variable_addr); + // liberate Cond Var Thread. + thread->SetCondVarWaitAddress(0); + std::size_t current_core = Core::System::GetInstance().CurrentCoreIndex(); auto& monitor = Core::System::GetInstance().Monitor(); @@ -1428,10 +1429,9 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target } } while (!monitor.ExclusiveWrite32(current_core, thread->GetMutexWaitAddress(), thread->GetWaitHandle())); - if (mutex_val == 0) { // We were able to acquire the mutex, resume this thread. - ASSERT(thread->GetStatus() == ThreadStatus::WaitMutex); + ASSERT(thread->GetStatus() == ThreadStatus::WaitCondVar); thread->ResumeFromWait(); auto* const lock_owner = thread->GetLockOwner(); @@ -1441,8 +1441,8 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target thread->SetLockOwner(nullptr); thread->SetMutexWaitAddress(0); - thread->SetCondVarWaitAddress(0); thread->SetWaitHandle(0); + Core::System::GetInstance().CpuCore(thread->GetProcessorID()).PrepareReschedule(); } else { // Atomically signal that the mutex now has a waiting thread. do { @@ -1461,12 +1461,11 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target const auto& handle_table = Core::CurrentProcess()->GetHandleTable(); auto owner = handle_table.Get<Thread>(owner_handle); ASSERT(owner); - ASSERT(thread->GetStatus() == ThreadStatus::WaitMutex); + ASSERT(thread->GetStatus() == ThreadStatus::WaitCondVar); thread->InvalidateWakeupCallback(); + thread->SetStatus(ThreadStatus::WaitMutex); owner->AddMutexWaiter(thread); - - Core::System::GetInstance().CpuCore(thread->GetProcessorID()).PrepareReschedule(); } } @@ -1586,14 +1585,121 @@ static ResultCode CreateTransferMemory(Handle* handle, VAddr addr, u64 size, u32 } auto& kernel = Core::System::GetInstance().Kernel(); - auto process = kernel.CurrentProcess(); - auto& handle_table = process->GetHandleTable(); - const auto shared_mem_handle = SharedMemory::Create(kernel, process, size, perms, perms, addr); + auto transfer_mem_handle = TransferMemory::Create(kernel, addr, size, perms); - CASCADE_RESULT(*handle, handle_table.Create(shared_mem_handle)); + auto& handle_table = kernel.CurrentProcess()->GetHandleTable(); + const auto result = handle_table.Create(std::move(transfer_mem_handle)); + if (result.Failed()) { + return result.Code(); + } + + *handle = *result; return RESULT_SUCCESS; } +static ResultCode MapTransferMemory(Handle handle, VAddr address, u64 size, u32 permission_raw) { + LOG_DEBUG(Kernel_SVC, + "called. handle=0x{:08X}, address=0x{:016X}, size=0x{:016X}, permissions=0x{:08X}", + handle, address, size, permission_raw); + + if (!Common::Is4KBAligned(address)) { + LOG_ERROR(Kernel_SVC, "Transfer memory addresses must be 4KB aligned (size=0x{:016X}).", + address); + return ERR_INVALID_ADDRESS; + } + + if (size == 0 || !Common::Is4KBAligned(size)) { + LOG_ERROR(Kernel_SVC, + "Transfer memory sizes must be 4KB aligned and not be zero (size=0x{:016X}).", + size); + return ERR_INVALID_SIZE; + } + + if (!IsValidAddressRange(address, size)) { + LOG_ERROR(Kernel_SVC, + "Given address and size overflows the 64-bit range (address=0x{:016X}, " + "size=0x{:016X}).", + address, size); + return ERR_INVALID_ADDRESS_STATE; + } + + const auto permissions = static_cast<MemoryPermission>(permission_raw); + if (permissions != MemoryPermission::None && permissions != MemoryPermission::Read && + permissions != MemoryPermission::ReadWrite) { + LOG_ERROR(Kernel_SVC, "Invalid transfer memory permissions given (permissions=0x{:08X}).", + permission_raw); + return ERR_INVALID_STATE; + } + + const auto& kernel = Core::System::GetInstance().Kernel(); + const auto* const current_process = kernel.CurrentProcess(); + const auto& handle_table = current_process->GetHandleTable(); + + auto transfer_memory = handle_table.Get<TransferMemory>(handle); + if (!transfer_memory) { + LOG_ERROR(Kernel_SVC, "Nonexistent transfer memory handle given (handle=0x{:08X}).", + handle); + return ERR_INVALID_HANDLE; + } + + if (!current_process->VMManager().IsWithinASLRRegion(address, size)) { + LOG_ERROR(Kernel_SVC, + "Given address and size don't fully fit within the ASLR region " + "(address=0x{:016X}, size=0x{:016X}).", + address, size); + return ERR_INVALID_MEMORY_RANGE; + } + + return transfer_memory->MapMemory(address, size, permissions); +} + +static ResultCode UnmapTransferMemory(Handle handle, VAddr address, u64 size) { + LOG_DEBUG(Kernel_SVC, "called. handle=0x{:08X}, address=0x{:016X}, size=0x{:016X}", handle, + address, size); + + if (!Common::Is4KBAligned(address)) { + LOG_ERROR(Kernel_SVC, "Transfer memory addresses must be 4KB aligned (size=0x{:016X}).", + address); + return ERR_INVALID_ADDRESS; + } + + if (size == 0 || !Common::Is4KBAligned(size)) { + LOG_ERROR(Kernel_SVC, + "Transfer memory sizes must be 4KB aligned and not be zero (size=0x{:016X}).", + size); + return ERR_INVALID_SIZE; + } + + if (!IsValidAddressRange(address, size)) { + LOG_ERROR(Kernel_SVC, + "Given address and size overflows the 64-bit range (address=0x{:016X}, " + "size=0x{:016X}).", + address, size); + return ERR_INVALID_ADDRESS_STATE; + } + + const auto& kernel = Core::System::GetInstance().Kernel(); + const auto* const current_process = kernel.CurrentProcess(); + const auto& handle_table = current_process->GetHandleTable(); + + auto transfer_memory = handle_table.Get<TransferMemory>(handle); + if (!transfer_memory) { + LOG_ERROR(Kernel_SVC, "Nonexistent transfer memory handle given (handle=0x{:08X}).", + handle); + return ERR_INVALID_HANDLE; + } + + if (!current_process->VMManager().IsWithinASLRRegion(address, size)) { + LOG_ERROR(Kernel_SVC, + "Given address and size don't fully fit within the ASLR region " + "(address=0x{:016X}, size=0x{:016X}).", + address, size); + return ERR_INVALID_MEMORY_RANGE; + } + + return transfer_memory->UnmapMemory(address, size); +} + static ResultCode GetThreadCoreMask(Handle thread_handle, u32* core, u64* mask) { LOG_TRACE(Kernel_SVC, "called, handle=0x{:08X}", thread_handle); @@ -1969,8 +2075,8 @@ static const FunctionDef SVC_Table[] = { {0x4E, nullptr, "ReadWriteRegister"}, {0x4F, nullptr, "SetProcessActivity"}, {0x50, SvcWrap<CreateSharedMemory>, "CreateSharedMemory"}, - {0x51, nullptr, "MapTransferMemory"}, - {0x52, nullptr, "UnmapTransferMemory"}, + {0x51, SvcWrap<MapTransferMemory>, "MapTransferMemory"}, + {0x52, SvcWrap<UnmapTransferMemory>, "UnmapTransferMemory"}, {0x53, nullptr, "CreateInterruptEvent"}, {0x54, nullptr, "QueryPhysicalAddress"}, {0x55, nullptr, "QueryIoMapping"}, @@ -2032,7 +2138,7 @@ void CallSVC(u32 immediate) { MICROPROFILE_SCOPE(Kernel_SVC); // Lock the global kernel mutex when we enter the kernel HLE. - std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock); + std::lock_guard lock{HLE::g_hle_lock}; const FunctionDef* info = GetSVCInfo(immediate); if (info) { diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp index 3b22e8e0d..fa3ac3abc 100644 --- a/src/core/hle/kernel/thread.cpp +++ b/src/core/hle/kernel/thread.cpp @@ -105,6 +105,7 @@ void Thread::ResumeFromWait() { case ThreadStatus::WaitSleep: case ThreadStatus::WaitIPC: case ThreadStatus::WaitMutex: + case ThreadStatus::WaitCondVar: case ThreadStatus::WaitArb: break; @@ -198,7 +199,7 @@ ResultVal<SharedPtr<Thread>> Thread::Create(KernelCore& kernel, std::string name thread->callback_handle = kernel.ThreadWakeupCallbackHandleTable().Create(thread).Unwrap(); thread->owner_process = &owner_process; thread->scheduler = &system.Scheduler(processor_id); - thread->scheduler->AddThread(thread, priority); + thread->scheduler->AddThread(thread); thread->tls_address = thread->owner_process->MarkNextAvailableTLSSlotAsUsed(*thread); // TODO(peachum): move to ScheduleThread() when scheduler is added so selected core is used @@ -351,7 +352,7 @@ void Thread::ChangeScheduler() { if (*new_processor_id != processor_id) { // Remove thread from previous core's scheduler scheduler->RemoveThread(this); - next_scheduler.AddThread(this, current_priority); + next_scheduler.AddThread(this); } processor_id = *new_processor_id; diff --git a/src/core/hle/kernel/thread.h b/src/core/hle/kernel/thread.h index faad5f391..9c684758c 100644 --- a/src/core/hle/kernel/thread.h +++ b/src/core/hle/kernel/thread.h @@ -51,7 +51,8 @@ enum class ThreadStatus { WaitIPC, ///< Waiting for the reply from an IPC request WaitSynchAny, ///< Waiting due to WaitSynch1 or WaitSynchN with wait_all = false WaitSynchAll, ///< Waiting due to WaitSynchronizationN with wait_all = true - WaitMutex, ///< Waiting due to an ArbitrateLock/WaitProcessWideKey svc + WaitMutex, ///< Waiting due to an ArbitrateLock svc + WaitCondVar, ///< Waiting due to an WaitProcessWideKey svc WaitArb, ///< Waiting due to a SignalToAddress/WaitForAddress svc Dormant, ///< Created but not yet made ready Dead ///< Run to completion, or forcefully terminated diff --git a/src/core/hle/kernel/transfer_memory.cpp b/src/core/hle/kernel/transfer_memory.cpp new file mode 100644 index 000000000..23228e1b5 --- /dev/null +++ b/src/core/hle/kernel/transfer_memory.cpp @@ -0,0 +1,73 @@ +// Copyright 2019 yuzu emulator team +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "core/hle/kernel/errors.h" +#include "core/hle/kernel/kernel.h" +#include "core/hle/kernel/process.h" +#include "core/hle/kernel/shared_memory.h" +#include "core/hle/kernel/transfer_memory.h" +#include "core/hle/result.h" + +namespace Kernel { + +TransferMemory::TransferMemory(KernelCore& kernel) : Object{kernel} {} +TransferMemory::~TransferMemory() = default; + +SharedPtr<TransferMemory> TransferMemory::Create(KernelCore& kernel, VAddr base_address, + size_t size, MemoryPermission permissions) { + SharedPtr<TransferMemory> transfer_memory{new TransferMemory(kernel)}; + + transfer_memory->base_address = base_address; + transfer_memory->memory_size = size; + transfer_memory->owner_permissions = permissions; + transfer_memory->owner_process = kernel.CurrentProcess(); + + return transfer_memory; +} + +ResultCode TransferMemory::MapMemory(VAddr address, size_t size, MemoryPermission permissions) { + if (memory_size != size) { + return ERR_INVALID_SIZE; + } + + if (owner_permissions != permissions) { + return ERR_INVALID_STATE; + } + + if (is_mapped) { + return ERR_INVALID_STATE; + } + + const auto map_state = owner_permissions == MemoryPermission::None + ? MemoryState::TransferMemoryIsolated + : MemoryState::TransferMemory; + auto& vm_manager = owner_process->VMManager(); + const auto map_result = vm_manager.MapMemoryBlock( + address, std::make_shared<std::vector<u8>>(size), 0, size, map_state); + + if (map_result.Failed()) { + return map_result.Code(); + } + + is_mapped = true; + return RESULT_SUCCESS; +} + +ResultCode TransferMemory::UnmapMemory(VAddr address, size_t size) { + if (memory_size != size) { + return ERR_INVALID_SIZE; + } + + auto& vm_manager = owner_process->VMManager(); + const auto result = vm_manager.UnmapRange(address, size); + + if (result.IsError()) { + return result; + } + + is_mapped = false; + return RESULT_SUCCESS; +} + +} // namespace Kernel diff --git a/src/core/hle/kernel/transfer_memory.h b/src/core/hle/kernel/transfer_memory.h new file mode 100644 index 000000000..ec294951e --- /dev/null +++ b/src/core/hle/kernel/transfer_memory.h @@ -0,0 +1,91 @@ +// Copyright 2019 yuzu emulator team +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include "core/hle/kernel/object.h" + +union ResultCode; + +namespace Kernel { + +class KernelCore; +class Process; + +enum class MemoryPermission : u32; + +/// Defines the interface for transfer memory objects. +/// +/// Transfer memory is typically used for the purpose of +/// transferring memory between separate process instances, +/// thus the name. +/// +class TransferMemory final : public Object { +public: + static constexpr HandleType HANDLE_TYPE = HandleType::TransferMemory; + + static SharedPtr<TransferMemory> Create(KernelCore& kernel, VAddr base_address, size_t size, + MemoryPermission permissions); + + TransferMemory(const TransferMemory&) = delete; + TransferMemory& operator=(const TransferMemory&) = delete; + + TransferMemory(TransferMemory&&) = delete; + TransferMemory& operator=(TransferMemory&&) = delete; + + std::string GetTypeName() const override { + return "TransferMemory"; + } + + std::string GetName() const override { + return GetTypeName(); + } + + HandleType GetHandleType() const override { + return HANDLE_TYPE; + } + + /// Attempts to map transfer memory with the given range and memory permissions. + /// + /// @param address The base address to being mapping memory at. + /// @param size The size of the memory to map, in bytes. + /// @param permissions The memory permissions to check against when mapping memory. + /// + /// @pre The given address, size, and memory permissions must all match + /// the same values that were given when creating the transfer memory + /// instance. + /// + ResultCode MapMemory(VAddr address, size_t size, MemoryPermission permissions); + + /// Unmaps the transfer memory with the given range + /// + /// @param address The base address to begin unmapping memory at. + /// @param size The size of the memory to unmap, in bytes. + /// + /// @pre The given address and size must be the same as the ones used + /// to create the transfer memory instance. + /// + ResultCode UnmapMemory(VAddr address, size_t size); + +private: + explicit TransferMemory(KernelCore& kernel); + ~TransferMemory() override; + + /// The base address for the memory managed by this instance. + VAddr base_address = 0; + + /// Size of the memory, in bytes, that this instance manages. + size_t memory_size = 0; + + /// The memory permissions that are applied to this instance. + MemoryPermission owner_permissions{}; + + /// The process that this transfer memory instance was created under. + Process* owner_process = nullptr; + + /// Whether or not this transfer memory instance has mapped memory. + bool is_mapped = false; +}; + +} // namespace Kernel diff --git a/src/core/hle/kernel/vm_manager.cpp b/src/core/hle/kernel/vm_manager.cpp index 22bf55ce7..ec0a480ce 100644 --- a/src/core/hle/kernel/vm_manager.cpp +++ b/src/core/hle/kernel/vm_manager.cpp @@ -256,57 +256,50 @@ ResultCode VMManager::ReprotectRange(VAddr target, u64 size, VMAPermission new_p return RESULT_SUCCESS; } -ResultVal<VAddr> VMManager::HeapAllocate(VAddr target, u64 size, VMAPermission perms) { - if (!IsWithinHeapRegion(target, size)) { - return ERR_INVALID_ADDRESS; +ResultVal<VAddr> VMManager::SetHeapSize(u64 size) { + if (size > GetHeapRegionSize()) { + return ERR_OUT_OF_MEMORY; + } + + // No need to do any additional work if the heap is already the given size. + if (size == GetCurrentHeapSize()) { + return MakeResult(heap_region_base); } if (heap_memory == nullptr) { // Initialize heap - heap_memory = std::make_shared<std::vector<u8>>(); - heap_start = heap_end = target; + heap_memory = std::make_shared<std::vector<u8>>(size); + heap_end = heap_region_base + size; } else { - UnmapRange(heap_start, heap_end - heap_start); - } - - // If necessary, expand backing vector to cover new heap extents. - if (target < heap_start) { - heap_memory->insert(begin(*heap_memory), heap_start - target, 0); - heap_start = target; - RefreshMemoryBlockMappings(heap_memory.get()); - } - if (target + size > heap_end) { - heap_memory->insert(end(*heap_memory), (target + size) - heap_end, 0); - heap_end = target + size; - RefreshMemoryBlockMappings(heap_memory.get()); + UnmapRange(heap_region_base, GetCurrentHeapSize()); } - ASSERT(heap_end - heap_start == heap_memory->size()); - CASCADE_RESULT(auto vma, MapMemoryBlock(target, heap_memory, target - heap_start, size, - MemoryState::Heap)); - Reprotect(vma, perms); + // If necessary, expand backing vector to cover new heap extents in + // the case of allocating. Otherwise, shrink the backing memory, + // if a smaller heap has been requested. + const u64 old_heap_size = GetCurrentHeapSize(); + if (size > old_heap_size) { + const u64 alloc_size = size - old_heap_size; - heap_used = size; - - return MakeResult<VAddr>(heap_end - size); -} + heap_memory->insert(heap_memory->end(), alloc_size, 0); + RefreshMemoryBlockMappings(heap_memory.get()); + } else if (size < old_heap_size) { + heap_memory->resize(size); + heap_memory->shrink_to_fit(); -ResultCode VMManager::HeapFree(VAddr target, u64 size) { - if (!IsWithinHeapRegion(target, size)) { - return ERR_INVALID_ADDRESS; + RefreshMemoryBlockMappings(heap_memory.get()); } - if (size == 0) { - return RESULT_SUCCESS; - } + heap_end = heap_region_base + size; + ASSERT(GetCurrentHeapSize() == heap_memory->size()); - const ResultCode result = UnmapRange(target, size); - if (result.IsError()) { - return result; + const auto mapping_result = + MapMemoryBlock(heap_region_base, heap_memory, 0, size, MemoryState::Heap); + if (mapping_result.Failed()) { + return mapping_result.Code(); } - heap_used -= size; - return RESULT_SUCCESS; + return MakeResult<VAddr>(heap_region_base); } MemoryInfo VMManager::QueryMemory(VAddr address) const { @@ -598,6 +591,7 @@ void VMManager::InitializeMemoryRegionRanges(FileSys::ProgramAddressSpaceType ty heap_region_base = map_region_end; heap_region_end = heap_region_base + heap_region_size; + heap_end = heap_region_base; new_map_region_base = heap_region_end; new_map_region_end = new_map_region_base + new_map_region_size; @@ -692,10 +686,6 @@ u64 VMManager::GetTotalMemoryUsage() const { return 0xF8000000; } -u64 VMManager::GetTotalHeapUsage() const { - return heap_used; -} - VAddr VMManager::GetAddressSpaceBaseAddress() const { return address_space_base; } @@ -778,6 +768,10 @@ u64 VMManager::GetHeapRegionSize() const { return heap_region_end - heap_region_base; } +u64 VMManager::GetCurrentHeapSize() const { + return heap_end - heap_region_base; +} + bool VMManager::IsWithinHeapRegion(VAddr address, u64 size) const { return IsInsideAddressRange(address, size, GetHeapRegionBaseAddress(), GetHeapRegionEndAddress()); diff --git a/src/core/hle/kernel/vm_manager.h b/src/core/hle/kernel/vm_manager.h index 7cdff6094..6f484b7bf 100644 --- a/src/core/hle/kernel/vm_manager.h +++ b/src/core/hle/kernel/vm_manager.h @@ -380,11 +380,41 @@ public: /// Changes the permissions of a range of addresses, splitting VMAs as necessary. ResultCode ReprotectRange(VAddr target, u64 size, VMAPermission new_perms); - ResultVal<VAddr> HeapAllocate(VAddr target, u64 size, VMAPermission perms); - ResultCode HeapFree(VAddr target, u64 size); - ResultCode MirrorMemory(VAddr dst_addr, VAddr src_addr, u64 size, MemoryState state); + /// Attempts to allocate a heap with the given size. + /// + /// @param size The size of the heap to allocate in bytes. + /// + /// @note If a heap is currently allocated, and this is called + /// with a size that is equal to the size of the current heap, + /// then this function will do nothing and return the current + /// heap's starting address, as there's no need to perform + /// any additional heap allocation work. + /// + /// @note If a heap is currently allocated, and this is called + /// with a size less than the current heap's size, then + /// this function will attempt to shrink the heap. + /// + /// @note If a heap is currently allocated, and this is called + /// with a size larger than the current heap's size, then + /// this function will attempt to extend the size of the heap. + /// + /// @returns A result indicating either success or failure. + /// <p> + /// If successful, this function will return a result + /// containing the starting address to the allocated heap. + /// <p> + /// If unsuccessful, this function will return a result + /// containing an error code. + /// + /// @pre The given size must lie within the allowable heap + /// memory region managed by this VMManager instance. + /// Failure to abide by this will result in ERR_OUT_OF_MEMORY + /// being returned as the result. + /// + ResultVal<VAddr> SetHeapSize(u64 size); + /// Queries the memory manager for information about the given address. /// /// @param address The address to query the memory manager about for information. @@ -418,9 +448,6 @@ public: /// Gets the total memory usage, used by svcGetInfo u64 GetTotalMemoryUsage() const; - /// Gets the total heap usage, used by svcGetInfo - u64 GetTotalHeapUsage() const; - /// Gets the address space base address VAddr GetAddressSpaceBaseAddress() const; @@ -469,6 +496,13 @@ public: /// Gets the total size of the heap region in bytes. u64 GetHeapRegionSize() const; + /// Gets the total size of the current heap in bytes. + /// + /// @note This is the current allocated heap size, not the size + /// of the region it's allowed to exist within. + /// + u64 GetCurrentHeapSize() const; + /// Determines whether or not the specified range is within the heap region. bool IsWithinHeapRegion(VAddr address, u64 size) const; @@ -617,9 +651,6 @@ private: VAddr new_map_region_base = 0; VAddr new_map_region_end = 0; - VAddr main_code_region_base = 0; - VAddr main_code_region_end = 0; - VAddr tls_io_region_base = 0; VAddr tls_io_region_end = 0; @@ -628,9 +659,9 @@ private: // This makes deallocation and reallocation of holes fast and keeps process memory contiguous // in the emulator address space, allowing Memory::GetPointer to be reasonably safe. std::shared_ptr<std::vector<u8>> heap_memory; - // The left/right bounds of the address space covered by heap_memory. - VAddr heap_start = 0; + + // The end of the currently allocated heap. This is not an inclusive + // end of the range. This is essentially 'base_address + current_size'. VAddr heap_end = 0; - u64 heap_used = 0; }; } // namespace Kernel diff --git a/src/core/hle/service/fatal/fatal.cpp b/src/core/hle/service/fatal/fatal.cpp index 770590d0b..2c229bcad 100644 --- a/src/core/hle/service/fatal/fatal.cpp +++ b/src/core/hle/service/fatal/fatal.cpp @@ -25,21 +25,34 @@ Module::Interface::Interface(std::shared_ptr<Module> module, const char* name) Module::Interface::~Interface() = default; struct FatalInfo { - std::array<u64_le, 31> registers{}; // TODO(ogniK): See if this actually is registers or - // not(find a game which has non zero valeus) - u64_le unk0{}; - u64_le unk1{}; - u64_le unk2{}; - u64_le unk3{}; - u64_le unk4{}; - u64_le unk5{}; - u64_le unk6{}; + enum class Architecture : s32 { + AArch64, + AArch32, + }; + + const char* ArchAsString() const { + return arch == Architecture::AArch64 ? "AArch64" : "AArch32"; + } + + std::array<u64_le, 31> registers{}; + u64_le sp{}; + u64_le pc{}; + u64_le pstate{}; + u64_le afsr0{}; + u64_le afsr1{}; + u64_le esr{}; + u64_le far{}; std::array<u64_le, 32> backtrace{}; - u64_le unk7{}; - u64_le unk8{}; + u64_le program_entry_point{}; + + // Bit flags that indicate which registers have been set with values + // for this context. The service itself uses these to determine which + // registers to specifically print out. + u64_le set_flags{}; + u32_le backtrace_size{}; - u32_le unk9{}; + Architecture arch{}; u32_le unk10{}; // TODO(ogniK): Is this even used or is it just padding? }; static_assert(sizeof(FatalInfo) == 0x250, "FatalInfo is an invalid size"); @@ -52,36 +65,36 @@ enum class FatalType : u32 { static void GenerateErrorReport(ResultCode error_code, const FatalInfo& info) { const auto title_id = Core::CurrentProcess()->GetTitleID(); - std::string crash_report = - fmt::format("Yuzu {}-{} crash report\n" - "Title ID: {:016x}\n" - "Result: 0x{:X} ({:04}-{:04d})\n" - "\n", - Common::g_scm_branch, Common::g_scm_desc, title_id, error_code.raw, - 2000 + static_cast<u32>(error_code.module.Value()), - static_cast<u32>(error_code.description.Value()), info.unk8, info.unk7); + std::string crash_report = fmt::format( + "Yuzu {}-{} crash report\n" + "Title ID: {:016x}\n" + "Result: 0x{:X} ({:04}-{:04d})\n" + "Set flags: 0x{:16X}\n" + "Program entry point: 0x{:16X}\n" + "\n", + Common::g_scm_branch, Common::g_scm_desc, title_id, error_code.raw, + 2000 + static_cast<u32>(error_code.module.Value()), + static_cast<u32>(error_code.description.Value()), info.set_flags, info.program_entry_point); if (info.backtrace_size != 0x0) { crash_report += "Registers:\n"; - // TODO(ogniK): This is just a guess, find a game which actually has non zero values for (size_t i = 0; i < info.registers.size(); i++) { crash_report += fmt::format(" X[{:02d}]: {:016x}\n", i, info.registers[i]); } - crash_report += fmt::format(" Unknown 0: {:016x}\n", info.unk0); - crash_report += fmt::format(" Unknown 1: {:016x}\n", info.unk1); - crash_report += fmt::format(" Unknown 2: {:016x}\n", info.unk2); - crash_report += fmt::format(" Unknown 3: {:016x}\n", info.unk3); - crash_report += fmt::format(" Unknown 4: {:016x}\n", info.unk4); - crash_report += fmt::format(" Unknown 5: {:016x}\n", info.unk5); - crash_report += fmt::format(" Unknown 6: {:016x}\n", info.unk6); + crash_report += fmt::format(" SP: {:016x}\n", info.sp); + crash_report += fmt::format(" PC: {:016x}\n", info.pc); + crash_report += fmt::format(" PSTATE: {:016x}\n", info.pstate); + crash_report += fmt::format(" AFSR0: {:016x}\n", info.afsr0); + crash_report += fmt::format(" AFSR1: {:016x}\n", info.afsr1); + crash_report += fmt::format(" ESR: {:016x}\n", info.esr); + crash_report += fmt::format(" FAR: {:016x}\n", info.far); crash_report += "\nBacktrace:\n"; for (size_t i = 0; i < info.backtrace_size; i++) { crash_report += fmt::format(" Backtrace[{:02d}]: {:016x}\n", i, info.backtrace[i]); } - crash_report += fmt::format("\nUnknown 7: 0x{:016x}\n", info.unk7); - crash_report += fmt::format("Unknown 8: 0x{:016x}\n", info.unk8); - crash_report += fmt::format("Unknown 9: 0x{:016x}\n", info.unk9); + + crash_report += fmt::format("Architecture: {}\n", info.ArchAsString()); crash_report += fmt::format("Unknown 10: 0x{:016x}\n", info.unk10); } @@ -125,13 +138,13 @@ static void ThrowFatalError(ResultCode error_code, FatalType fatal_type, const F case FatalType::ErrorReport: GenerateErrorReport(error_code, info); break; - }; + } } void Module::Interface::ThrowFatal(Kernel::HLERequestContext& ctx) { LOG_ERROR(Service_Fatal, "called"); IPC::RequestParser rp{ctx}; - auto error_code = rp.Pop<ResultCode>(); + const auto error_code = rp.Pop<ResultCode>(); ThrowFatalError(error_code, FatalType::ErrorScreen, {}); IPC::ResponseBuilder rb{ctx, 2}; @@ -141,8 +154,8 @@ void Module::Interface::ThrowFatal(Kernel::HLERequestContext& ctx) { void Module::Interface::ThrowFatalWithPolicy(Kernel::HLERequestContext& ctx) { LOG_ERROR(Service_Fatal, "called"); IPC::RequestParser rp(ctx); - auto error_code = rp.Pop<ResultCode>(); - auto fatal_type = rp.PopEnum<FatalType>(); + const auto error_code = rp.Pop<ResultCode>(); + const auto fatal_type = rp.PopEnum<FatalType>(); ThrowFatalError(error_code, fatal_type, {}); // No info is passed with ThrowFatalWithPolicy IPC::ResponseBuilder rb{ctx, 2}; @@ -152,9 +165,9 @@ void Module::Interface::ThrowFatalWithPolicy(Kernel::HLERequestContext& ctx) { void Module::Interface::ThrowFatalWithCpuContext(Kernel::HLERequestContext& ctx) { LOG_ERROR(Service_Fatal, "called"); IPC::RequestParser rp(ctx); - auto error_code = rp.Pop<ResultCode>(); - auto fatal_type = rp.PopEnum<FatalType>(); - auto fatal_info = ctx.ReadBuffer(); + const auto error_code = rp.Pop<ResultCode>(); + const auto fatal_type = rp.PopEnum<FatalType>(); + const auto fatal_info = ctx.ReadBuffer(); FatalInfo info{}; ASSERT_MSG(fatal_info.size() == sizeof(FatalInfo), "Invalid fatal info buffer size!"); diff --git a/src/core/hle/service/hid/hid.cpp b/src/core/hle/service/hid/hid.cpp index 8a6de83a2..63b55758b 100644 --- a/src/core/hle/service/hid/hid.cpp +++ b/src/core/hle/service/hid/hid.cpp @@ -36,9 +36,9 @@ namespace Service::HID { // Updating period for each HID device. // TODO(ogniK): Find actual polling rate of hid -constexpr u64 pad_update_ticks = Core::Timing::BASE_CLOCK_RATE / 66; -constexpr u64 accelerometer_update_ticks = Core::Timing::BASE_CLOCK_RATE / 100; -constexpr u64 gyroscope_update_ticks = Core::Timing::BASE_CLOCK_RATE / 100; +constexpr s64 pad_update_ticks = static_cast<s64>(Core::Timing::BASE_CLOCK_RATE / 66); +constexpr s64 accelerometer_update_ticks = static_cast<s64>(Core::Timing::BASE_CLOCK_RATE / 100); +constexpr s64 gyroscope_update_ticks = static_cast<s64>(Core::Timing::BASE_CLOCK_RATE / 100); constexpr std::size_t SHARED_MEMORY_SIZE = 0x40000; IAppletResource::IAppletResource() : ServiceFramework("IAppletResource") { @@ -75,7 +75,7 @@ IAppletResource::IAppletResource() : ServiceFramework("IAppletResource") { // Register update callbacks auto& core_timing = Core::System::GetInstance().CoreTiming(); pad_update_event = - core_timing.RegisterEvent("HID::UpdatePadCallback", [this](u64 userdata, int cycles_late) { + core_timing.RegisterEvent("HID::UpdatePadCallback", [this](u64 userdata, s64 cycles_late) { UpdateControllers(userdata, cycles_late); }); @@ -106,7 +106,7 @@ void IAppletResource::GetSharedMemoryHandle(Kernel::HLERequestContext& ctx) { rb.PushCopyObjects(shared_mem); } -void IAppletResource::UpdateControllers(u64 userdata, int cycles_late) { +void IAppletResource::UpdateControllers(u64 userdata, s64 cycles_late) { auto& core_timing = Core::System::GetInstance().CoreTiming(); const bool should_reload = Settings::values.is_device_reload_pending.exchange(false); diff --git a/src/core/hle/service/hid/hid.h b/src/core/hle/service/hid/hid.h index 498602de5..d3660cad2 100644 --- a/src/core/hle/service/hid/hid.h +++ b/src/core/hle/service/hid/hid.h @@ -65,7 +65,7 @@ private: } void GetSharedMemoryHandle(Kernel::HLERequestContext& ctx); - void UpdateControllers(u64 userdata, int cycles_late); + void UpdateControllers(u64 userdata, s64 cycles_late); Kernel::SharedPtr<Kernel::SharedMemory> shared_mem; diff --git a/src/core/hle/service/nfp/nfp.cpp b/src/core/hle/service/nfp/nfp.cpp index 1c4482e47..c6babdd4d 100644 --- a/src/core/hle/service/nfp/nfp.cpp +++ b/src/core/hle/service/nfp/nfp.cpp @@ -335,7 +335,7 @@ void Module::Interface::CreateUserInterface(Kernel::HLERequestContext& ctx) { } bool Module::Interface::LoadAmiibo(const std::vector<u8>& buffer) { - std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock); + std::lock_guard lock{HLE::g_hle_lock}; if (buffer.size() < sizeof(AmiiboFile)) { return false; } diff --git a/src/core/hle/service/nvflinger/nvflinger.cpp b/src/core/hle/service/nvflinger/nvflinger.cpp index fc496b654..c7f5bbf28 100644 --- a/src/core/hle/service/nvflinger/nvflinger.cpp +++ b/src/core/hle/service/nvflinger/nvflinger.cpp @@ -26,7 +26,7 @@ namespace Service::NVFlinger { constexpr std::size_t SCREEN_REFRESH_RATE = 60; -constexpr u64 frame_ticks = static_cast<u64>(Core::Timing::BASE_CLOCK_RATE / SCREEN_REFRESH_RATE); +constexpr s64 frame_ticks = static_cast<s64>(Core::Timing::BASE_CLOCK_RATE / SCREEN_REFRESH_RATE); NVFlinger::NVFlinger(Core::Timing::CoreTiming& core_timing) : core_timing{core_timing} { displays.emplace_back(0, "Default"); @@ -37,7 +37,7 @@ NVFlinger::NVFlinger(Core::Timing::CoreTiming& core_timing) : core_timing{core_t // Schedule the screen composition events composition_event = - core_timing.RegisterEvent("ScreenComposition", [this](u64 userdata, int cycles_late) { + core_timing.RegisterEvent("ScreenComposition", [this](u64 userdata, s64 cycles_late) { Compose(); this->core_timing.ScheduleEvent(frame_ticks - cycles_late, composition_event); }); diff --git a/src/core/hle/service/set/set_sys.cpp b/src/core/hle/service/set/set_sys.cpp index c9b4da5b0..ecee554bf 100644 --- a/src/core/hle/service/set/set_sys.cpp +++ b/src/core/hle/service/set/set_sys.cpp @@ -2,13 +2,88 @@ // Licensed under GPLv2 or any later version // Refer to the license.txt file included. +#include "common/assert.h" #include "common/logging/log.h" +#include "core/file_sys/errors.h" +#include "core/file_sys/system_archive/system_version.h" #include "core/hle/ipc_helpers.h" #include "core/hle/kernel/client_port.h" +#include "core/hle/service/filesystem/filesystem.h" #include "core/hle/service/set/set_sys.h" namespace Service::Set { +namespace { +constexpr u64 SYSTEM_VERSION_FILE_MINOR_REVISION_OFFSET = 0x05; + +enum class GetFirmwareVersionType { + Version1, + Version2, +}; + +void GetFirmwareVersionImpl(Kernel::HLERequestContext& ctx, GetFirmwareVersionType type) { + LOG_WARNING(Service_SET, "called - Using hardcoded firmware version '{}'", + FileSys::SystemArchive::GetLongDisplayVersion()); + + ASSERT_MSG(ctx.GetWriteBufferSize() == 0x100, + "FirmwareVersion output buffer must be 0x100 bytes in size!"); + + // Instead of using the normal procedure of checking for the real system archive and if it + // doesn't exist, synthesizing one, I feel that that would lead to strange bugs because a + // used is using a really old or really new SystemVersion title. The synthesized one ensures + // consistence (currently reports as 5.1.0-0.0) + const auto archive = FileSys::SystemArchive::SystemVersion(); + + const auto early_exit_failure = [&ctx](const std::string& desc, ResultCode code) { + LOG_ERROR(Service_SET, "General failure while attempting to resolve firmware version ({}).", + desc.c_str()); + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(code); + }; + + if (archive == nullptr) { + early_exit_failure("The system version archive couldn't be synthesized.", + FileSys::ERROR_FAILED_MOUNT_ARCHIVE); + return; + } + + const auto ver_file = archive->GetFile("file"); + if (ver_file == nullptr) { + early_exit_failure("The system version archive didn't contain the file 'file'.", + FileSys::ERROR_INVALID_ARGUMENT); + return; + } + + auto data = ver_file->ReadAllBytes(); + if (data.size() != 0x100) { + early_exit_failure("The system version file 'file' was not the correct size.", + FileSys::ERROR_OUT_OF_BOUNDS); + return; + } + + // If the command is GetFirmwareVersion (as opposed to GetFirmwareVersion2), hardware will + // zero out the REVISION_MINOR field. + if (type == GetFirmwareVersionType::Version1) { + data[SYSTEM_VERSION_FILE_MINOR_REVISION_OFFSET] = 0; + } + + ctx.WriteBuffer(data); + + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(RESULT_SUCCESS); +} +} // Anonymous namespace + +void SET_SYS::GetFirmwareVersion(Kernel::HLERequestContext& ctx) { + LOG_DEBUG(Service_SET, "called"); + GetFirmwareVersionImpl(ctx, GetFirmwareVersionType::Version1); +} + +void SET_SYS::GetFirmwareVersion2(Kernel::HLERequestContext& ctx) { + LOG_DEBUG(Service_SET, "called"); + GetFirmwareVersionImpl(ctx, GetFirmwareVersionType::Version2); +} + void SET_SYS::GetColorSetId(Kernel::HLERequestContext& ctx) { LOG_DEBUG(Service_SET, "called"); @@ -33,8 +108,8 @@ SET_SYS::SET_SYS() : ServiceFramework("set:sys") { {0, nullptr, "SetLanguageCode"}, {1, nullptr, "SetNetworkSettings"}, {2, nullptr, "GetNetworkSettings"}, - {3, nullptr, "GetFirmwareVersion"}, - {4, nullptr, "GetFirmwareVersion2"}, + {3, &SET_SYS::GetFirmwareVersion, "GetFirmwareVersion"}, + {4, &SET_SYS::GetFirmwareVersion2, "GetFirmwareVersion2"}, {5, nullptr, "GetFirmwareVersionDigest"}, {7, nullptr, "GetLockScreenFlag"}, {8, nullptr, "SetLockScreenFlag"}, diff --git a/src/core/hle/service/set/set_sys.h b/src/core/hle/service/set/set_sys.h index f602f3c77..13ee2cf46 100644 --- a/src/core/hle/service/set/set_sys.h +++ b/src/core/hle/service/set/set_sys.h @@ -20,6 +20,8 @@ private: BasicBlack = 1, }; + void GetFirmwareVersion(Kernel::HLERequestContext& ctx); + void GetFirmwareVersion2(Kernel::HLERequestContext& ctx); void GetColorSetId(Kernel::HLERequestContext& ctx); void SetColorSetId(Kernel::HLERequestContext& ctx); diff --git a/src/core/loader/nso.cpp b/src/core/loader/nso.cpp index 7494f8a28..714d85a59 100644 --- a/src/core/loader/nso.cpp +++ b/src/core/loader/nso.cpp @@ -21,36 +21,8 @@ #include "core/settings.h" namespace Loader { - -struct NsoSegmentHeader { - u32_le offset; - u32_le location; - u32_le size; - union { - u32_le alignment; - u32_le bss_size; - }; -}; -static_assert(sizeof(NsoSegmentHeader) == 0x10, "NsoSegmentHeader has incorrect size."); - -struct NsoHeader { - u32_le magic; - u32_le version; - INSERT_PADDING_WORDS(1); - u8 flags; - std::array<NsoSegmentHeader, 3> segments; // Text, RoData, Data (in that order) - std::array<u8, 0x20> build_id; - std::array<u32_le, 3> segments_compressed_size; - - bool IsSegmentCompressed(size_t segment_num) const { - ASSERT_MSG(segment_num < 3, "Invalid segment {}", segment_num); - return ((flags >> segment_num) & 1); - } -}; -static_assert(sizeof(NsoHeader) == 0x6c, "NsoHeader has incorrect size."); -static_assert(std::is_trivially_copyable_v<NsoHeader>, "NsoHeader isn't trivially copyable."); - -struct ModHeader { +namespace { +struct MODHeader { u32_le magic; u32_le dynamic_offset; u32_le bss_start_offset; @@ -59,25 +31,10 @@ struct ModHeader { u32_le eh_frame_hdr_end_offset; u32_le module_offset; // Offset to runtime-generated module object. typically equal to .bss base }; -static_assert(sizeof(ModHeader) == 0x1c, "ModHeader has incorrect size."); - -AppLoader_NSO::AppLoader_NSO(FileSys::VirtualFile file) : AppLoader(std::move(file)) {} - -FileType AppLoader_NSO::IdentifyType(const FileSys::VirtualFile& file) { - u32 magic = 0; - if (file->ReadObject(&magic) != sizeof(magic)) { - return FileType::Error; - } - - if (Common::MakeMagic('N', 'S', 'O', '0') != magic) { - return FileType::Error; - } - - return FileType::NSO; -} +static_assert(sizeof(MODHeader) == 0x1c, "MODHeader has incorrect size."); -static std::vector<u8> DecompressSegment(const std::vector<u8>& compressed_data, - const NsoSegmentHeader& header) { +std::vector<u8> DecompressSegment(const std::vector<u8>& compressed_data, + const NSOSegmentHeader& header) { std::vector<u8> uncompressed_data(header.size); const int bytes_uncompressed = LZ4_decompress_safe(reinterpret_cast<const char*>(compressed_data.data()), @@ -91,23 +48,47 @@ static std::vector<u8> DecompressSegment(const std::vector<u8>& compressed_data, return uncompressed_data; } -static constexpr u32 PageAlignSize(u32 size) { +constexpr u32 PageAlignSize(u32 size) { return (size + Memory::PAGE_MASK) & ~Memory::PAGE_MASK; } +} // Anonymous namespace + +bool NSOHeader::IsSegmentCompressed(size_t segment_num) const { + ASSERT_MSG(segment_num < 3, "Invalid segment {}", segment_num); + return ((flags >> segment_num) & 1) != 0; +} + +AppLoader_NSO::AppLoader_NSO(FileSys::VirtualFile file) : AppLoader(std::move(file)) {} + +FileType AppLoader_NSO::IdentifyType(const FileSys::VirtualFile& file) { + u32 magic = 0; + if (file->ReadObject(&magic) != sizeof(magic)) { + return FileType::Error; + } + + if (Common::MakeMagic('N', 'S', 'O', '0') != magic) { + return FileType::Error; + } + + return FileType::NSO; +} std::optional<VAddr> AppLoader_NSO::LoadModule(Kernel::Process& process, const FileSys::VfsFile& file, VAddr load_base, bool should_pass_arguments, std::optional<FileSys::PatchManager> pm) { - if (file.GetSize() < sizeof(NsoHeader)) + if (file.GetSize() < sizeof(NSOHeader)) { return {}; + } - NsoHeader nso_header{}; - if (sizeof(NsoHeader) != file.ReadObject(&nso_header)) + NSOHeader nso_header{}; + if (sizeof(NSOHeader) != file.ReadObject(&nso_header)) { return {}; + } - if (nso_header.magic != Common::MakeMagic('N', 'S', 'O', '0')) + if (nso_header.magic != Common::MakeMagic('N', 'S', 'O', '0')) { return {}; + } // Build program image Kernel::CodeSet codeset; @@ -143,10 +124,10 @@ std::optional<VAddr> AppLoader_NSO::LoadModule(Kernel::Process& process, std::memcpy(&module_offset, program_image.data() + 4, sizeof(u32)); // Read MOD header - ModHeader mod_header{}; + MODHeader mod_header{}; // Default .bss to size in segment header if MOD0 section doesn't exist u32 bss_size{PageAlignSize(nso_header.segments[2].bss_size)}; - std::memcpy(&mod_header, program_image.data() + module_offset, sizeof(ModHeader)); + std::memcpy(&mod_header, program_image.data() + module_offset, sizeof(MODHeader)); const bool has_mod_header{mod_header.magic == Common::MakeMagic('M', 'O', 'D', '0')}; if (has_mod_header) { // Resize program image to include .bss section and page align each section @@ -158,13 +139,15 @@ std::optional<VAddr> AppLoader_NSO::LoadModule(Kernel::Process& process, // Apply patches if necessary if (pm && (pm->HasNSOPatch(nso_header.build_id) || Settings::values.dump_nso)) { - std::vector<u8> pi_header(program_image.size() + 0x100); - std::memcpy(pi_header.data(), &nso_header, sizeof(NsoHeader)); - std::memcpy(pi_header.data() + 0x100, program_image.data(), program_image.size()); + std::vector<u8> pi_header(sizeof(NSOHeader) + program_image.size()); + pi_header.insert(pi_header.begin(), reinterpret_cast<u8*>(&nso_header), + reinterpret_cast<u8*>(&nso_header) + sizeof(NSOHeader)); + pi_header.insert(pi_header.begin() + sizeof(NSOHeader), program_image.begin(), + program_image.end()); pi_header = pm->PatchNSO(pi_header); - std::memcpy(program_image.data(), pi_header.data() + 0x100, program_image.size()); + std::copy(pi_header.begin() + sizeof(NSOHeader), pi_header.end(), program_image.begin()); } // Apply cheats if they exist and the program has a valid title ID diff --git a/src/core/loader/nso.h b/src/core/loader/nso.h index 167c8a694..4674c3724 100644 --- a/src/core/loader/nso.h +++ b/src/core/loader/nso.h @@ -4,7 +4,9 @@ #pragma once +#include <array> #include <optional> +#include <type_traits> #include "common/common_types.h" #include "common/swap.h" #include "core/file_sys/patch_manager.h" @@ -16,6 +18,43 @@ class Process; namespace Loader { +struct NSOSegmentHeader { + u32_le offset; + u32_le location; + u32_le size; + union { + u32_le alignment; + u32_le bss_size; + }; +}; +static_assert(sizeof(NSOSegmentHeader) == 0x10, "NsoSegmentHeader has incorrect size."); + +struct NSOHeader { + using SHA256Hash = std::array<u8, 0x20>; + + struct RODataRelativeExtent { + u32_le data_offset; + u32_le size; + }; + + u32_le magic; + u32_le version; + u32 reserved; + u32_le flags; + std::array<NSOSegmentHeader, 3> segments; // Text, RoData, Data (in that order) + std::array<u8, 0x20> build_id; + std::array<u32_le, 3> segments_compressed_size; + std::array<u8, 0x1C> padding; + RODataRelativeExtent api_info_extent; + RODataRelativeExtent dynstr_extent; + RODataRelativeExtent dynsyn_extent; + std::array<SHA256Hash, 3> segment_hashes; + + bool IsSegmentCompressed(size_t segment_num) const; +}; +static_assert(sizeof(NSOHeader) == 0x100, "NSOHeader has incorrect size."); +static_assert(std::is_trivially_copyable_v<NSOHeader>, "NSOHeader must be trivially copyable."); + constexpr u64 NSO_ARGUMENT_DATA_ALLOCATION_SIZE = 0x9000; struct NSOArgumentHeader { diff --git a/src/core/perf_stats.cpp b/src/core/perf_stats.cpp index c716a462b..4afd6c8a3 100644 --- a/src/core/perf_stats.cpp +++ b/src/core/perf_stats.cpp @@ -18,13 +18,13 @@ using std::chrono::microseconds; namespace Core { void PerfStats::BeginSystemFrame() { - std::lock_guard<std::mutex> lock(object_mutex); + std::lock_guard lock{object_mutex}; frame_begin = Clock::now(); } void PerfStats::EndSystemFrame() { - std::lock_guard<std::mutex> lock(object_mutex); + std::lock_guard lock{object_mutex}; auto frame_end = Clock::now(); accumulated_frametime += frame_end - frame_begin; @@ -35,13 +35,13 @@ void PerfStats::EndSystemFrame() { } void PerfStats::EndGameFrame() { - std::lock_guard<std::mutex> lock(object_mutex); + std::lock_guard lock{object_mutex}; game_frames += 1; } PerfStatsResults PerfStats::GetAndResetStats(microseconds current_system_time_us) { - std::lock_guard<std::mutex> lock(object_mutex); + std::lock_guard lock{object_mutex}; const auto now = Clock::now(); // Walltime elapsed since stats were reset @@ -67,7 +67,7 @@ PerfStatsResults PerfStats::GetAndResetStats(microseconds current_system_time_us } double PerfStats::GetLastFrameTimeScale() { - std::lock_guard<std::mutex> lock(object_mutex); + std::lock_guard lock{object_mutex}; constexpr double FRAME_LENGTH = 1.0 / 60; return duration_cast<DoubleSecs>(previous_frame_length).count() / FRAME_LENGTH; |