diff options
196 files changed, 7740 insertions, 4366 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt index 467d769a2..c906c5a50 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -164,7 +164,7 @@ if (ENABLE_SDL2) set(SDL2_LIBRARIES "SDL2::SDL2") endif() - include_directories(${SDL2_INCLUDE_DIRS}) + include_directories(SYSTEM ${SDL2_INCLUDE_DIRS}) add_library(SDL2 INTERFACE) target_link_libraries(SDL2 INTERFACE "${SDL2_LIBRARIES}") endif() diff --git a/externals/CMakeLists.txt b/externals/CMakeLists.txt index d4421f697..653c64f47 100644 --- a/externals/CMakeLists.txt +++ b/externals/CMakeLists.txt @@ -16,11 +16,11 @@ add_library(catch-single-include INTERFACE) target_include_directories(catch-single-include INTERFACE catch/single_include) # libfmt -pkg_check_modules(FMT IMPORTED_TARGET GLOBAL fmt>=6.1.0) +pkg_check_modules(FMT IMPORTED_TARGET GLOBAL fmt>=6.2.0) if (FMT_FOUND) add_library(fmt::fmt ALIAS PkgConfig::FMT) else() - message(STATUS "fmt 6.1.0 or newer not found, falling back to externals") + message(STATUS "fmt 6.2.0 or newer not found, falling back to externals") add_subdirectory(fmt) add_library(fmt::fmt ALIAS fmt) endif() diff --git a/externals/dynarmic b/externals/dynarmic -Subproject f6ae9e1c3311b747b7b91fd903c62bf40b3b9c8 +Subproject a3cd05577c9b6c51f0f345d0e915b6feab68fe1 diff --git a/externals/fmt b/externals/fmt -Subproject 4b8f8fac96a7819f28f4be523ca10a2d5d8aaaf +Subproject 9bdd1596cef1b57b9556f8bef32dc4a32322ef3 diff --git a/externals/opus/CMakeLists.txt b/externals/opus/CMakeLists.txt index cbb393272..94a86551f 100644 --- a/externals/opus/CMakeLists.txt +++ b/externals/opus/CMakeLists.txt @@ -203,7 +203,11 @@ endif() target_compile_definitions(opus PRIVATE OPUS_BUILD ENABLE_HARDENING) if(NOT MSVC) - target_compile_definitions(opus PRIVATE _FORTIFY_SOURCE=2) + if(MINGW) + target_compile_definitions(opus PRIVATE _FORTIFY_SOURCE=0) + else() + target_compile_definitions(opus PRIVATE _FORTIFY_SOURCE=2) + endif() endif() # It is strongly recommended to uncomment one of these VAR_ARRAYS: Use C99 diff --git a/src/audio_core/CMakeLists.txt b/src/audio_core/CMakeLists.txt index c381dbe1d..5ef38a337 100644 --- a/src/audio_core/CMakeLists.txt +++ b/src/audio_core/CMakeLists.txt @@ -7,9 +7,12 @@ add_library(audio_core STATIC audio_out.h audio_renderer.cpp audio_renderer.h + behavior_info.cpp + behavior_info.h buffer.h codec.cpp codec.h + common.h null_sink.h sink.h sink_details.cpp diff --git a/src/audio_core/audio_renderer.cpp b/src/audio_core/audio_renderer.cpp index c187d8ac5..d18ef6940 100644 --- a/src/audio_core/audio_renderer.cpp +++ b/src/audio_core/audio_renderer.cpp @@ -6,6 +6,7 @@ #include "audio_core/audio_out.h" #include "audio_core/audio_renderer.h" #include "audio_core/codec.h" +#include "audio_core/common.h" #include "common/assert.h" #include "common/logging/log.h" #include "core/core.h" @@ -36,9 +37,9 @@ public: } void SetWaveIndex(std::size_t index); - std::vector<s16> DequeueSamples(std::size_t sample_count, Memory::Memory& memory); + std::vector<s16> DequeueSamples(std::size_t sample_count, Core::Memory::Memory& memory); void UpdateState(); - void RefreshBuffer(Memory::Memory& memory); + void RefreshBuffer(Core::Memory::Memory& memory); private: bool is_in_use{}; @@ -66,19 +67,20 @@ public: return info; } - void UpdateState(Memory::Memory& memory); + void UpdateState(Core::Memory::Memory& memory); private: EffectOutStatus out_status{}; EffectInStatus info{}; }; -AudioRenderer::AudioRenderer(Core::Timing::CoreTiming& core_timing, Memory::Memory& memory_, + +AudioRenderer::AudioRenderer(Core::Timing::CoreTiming& core_timing, Core::Memory::Memory& memory_, AudioRendererParameter params, std::shared_ptr<Kernel::WritableEvent> buffer_event, std::size_t instance_number) : worker_params{params}, buffer_event{buffer_event}, voices(params.voice_count), effects(params.effect_count), memory{memory_} { - + behavior_info.SetUserRevision(params.revision); audio_out = std::make_unique<AudioCore::AudioOut>(); stream = audio_out->OpenStream(core_timing, STREAM_SAMPLE_RATE, STREAM_NUM_CHANNELS, fmt::format("AudioRenderer-Instance{}", instance_number), @@ -108,17 +110,17 @@ Stream::State AudioRenderer::GetStreamState() const { return stream->GetState(); } -static constexpr u32 VersionFromRevision(u32_le rev) { - // "REV7" -> 7 - return ((rev >> 24) & 0xff) - 0x30; -} - -std::vector<u8> AudioRenderer::UpdateAudioRenderer(const std::vector<u8>& input_params) { +ResultVal<std::vector<u8>> AudioRenderer::UpdateAudioRenderer(const std::vector<u8>& input_params) { // Copy UpdateDataHeader struct UpdateDataHeader config{}; std::memcpy(&config, input_params.data(), sizeof(UpdateDataHeader)); u32 memory_pool_count = worker_params.effect_count + (worker_params.voice_count * 4); + if (!behavior_info.UpdateInput(input_params, sizeof(UpdateDataHeader))) { + LOG_ERROR(Audio, "Failed to update behavior info input parameters"); + return Audren::ERR_INVALID_PARAMETERS; + } + // Copy MemoryPoolInfo structs std::vector<MemoryPoolInfo> mem_pool_info(memory_pool_count); std::memcpy(mem_pool_info.data(), @@ -172,8 +174,7 @@ std::vector<u8> AudioRenderer::UpdateAudioRenderer(const std::vector<u8>& input_ // Copy output header UpdateDataHeader response_data{worker_params}; std::vector<u8> output_params(response_data.total_size); - const auto audren_revision = VersionFromRevision(config.revision); - if (audren_revision >= 5) { + if (behavior_info.IsElapsedFrameCountSupported()) { response_data.frame_count = 0x10; response_data.total_size += 0x10; } @@ -199,7 +200,19 @@ std::vector<u8> AudioRenderer::UpdateAudioRenderer(const std::vector<u8>& input_ sizeof(EffectOutStatus)); effect_out_status_offset += sizeof(EffectOutStatus); } - return output_params; + + // Update behavior info output + const std::size_t behavior_out_status_offset{ + sizeof(UpdateDataHeader) + response_data.memory_pools_size + response_data.voices_size + + response_data.effects_size + response_data.sinks_size + + response_data.performance_manager_size}; + + if (!behavior_info.UpdateOutput(output_params, behavior_out_status_offset)) { + LOG_ERROR(Audio, "Failed to update behavior info output parameters"); + return Audren::ERR_INVALID_PARAMETERS; + } + + return MakeResult(output_params); } void AudioRenderer::VoiceState::SetWaveIndex(std::size_t index) { @@ -208,7 +221,7 @@ void AudioRenderer::VoiceState::SetWaveIndex(std::size_t index) { } std::vector<s16> AudioRenderer::VoiceState::DequeueSamples(std::size_t sample_count, - Memory::Memory& memory) { + Core::Memory::Memory& memory) { if (!IsPlaying()) { return {}; } @@ -258,7 +271,7 @@ void AudioRenderer::VoiceState::UpdateState() { is_in_use = info.is_in_use; } -void AudioRenderer::VoiceState::RefreshBuffer(Memory::Memory& memory) { +void AudioRenderer::VoiceState::RefreshBuffer(Core::Memory::Memory& memory) { const auto wave_buffer_address = info.wave_buffer[wave_index].buffer_addr; const auto wave_buffer_size = info.wave_buffer[wave_index].buffer_sz; std::vector<s16> new_samples(wave_buffer_size / sizeof(s16)); @@ -310,7 +323,7 @@ void AudioRenderer::VoiceState::RefreshBuffer(Memory::Memory& memory) { is_refresh_pending = false; } -void AudioRenderer::EffectState::UpdateState(Memory::Memory& memory) { +void AudioRenderer::EffectState::UpdateState(Core::Memory::Memory& memory) { if (info.is_new) { out_status.state = EffectStatus::New; } else { diff --git a/src/audio_core/audio_renderer.h b/src/audio_core/audio_renderer.h index c0fae669e..b42770fae 100644 --- a/src/audio_core/audio_renderer.h +++ b/src/audio_core/audio_renderer.h @@ -8,11 +8,13 @@ #include <memory> #include <vector> +#include "audio_core/behavior_info.h" #include "audio_core/stream.h" #include "common/common_funcs.h" #include "common/common_types.h" #include "common/swap.h" #include "core/hle/kernel/object.h" +#include "core/hle/result.h" namespace Core::Timing { class CoreTiming; @@ -22,7 +24,7 @@ namespace Kernel { class WritableEvent; } -namespace Memory { +namespace Core::Memory { class Memory; } @@ -221,12 +223,12 @@ static_assert(sizeof(UpdateDataHeader) == 0x40, "UpdateDataHeader has wrong size class AudioRenderer { public: - AudioRenderer(Core::Timing::CoreTiming& core_timing, Memory::Memory& memory_, + AudioRenderer(Core::Timing::CoreTiming& core_timing, Core::Memory::Memory& memory_, AudioRendererParameter params, std::shared_ptr<Kernel::WritableEvent> buffer_event, std::size_t instance_number); ~AudioRenderer(); - std::vector<u8> UpdateAudioRenderer(const std::vector<u8>& input_params); + ResultVal<std::vector<u8>> UpdateAudioRenderer(const std::vector<u8>& input_params); void QueueMixedBuffer(Buffer::Tag tag); void ReleaseAndQueueBuffers(); u32 GetSampleRate() const; @@ -237,6 +239,7 @@ public: private: class EffectState; class VoiceState; + BehaviorInfo behavior_info{}; AudioRendererParameter worker_params; std::shared_ptr<Kernel::WritableEvent> buffer_event; @@ -244,7 +247,7 @@ private: std::vector<EffectState> effects; std::unique_ptr<AudioOut> audio_out; StreamPtr stream; - Memory::Memory& memory; + Core::Memory::Memory& memory; }; } // namespace AudioCore diff --git a/src/audio_core/behavior_info.cpp b/src/audio_core/behavior_info.cpp new file mode 100644 index 000000000..94b7a3bf1 --- /dev/null +++ b/src/audio_core/behavior_info.cpp @@ -0,0 +1,100 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include <cstring> +#include "audio_core/behavior_info.h" +#include "audio_core/common.h" +#include "common/logging/log.h" + +namespace AudioCore { + +BehaviorInfo::BehaviorInfo() : process_revision(CURRENT_PROCESS_REVISION) {} +BehaviorInfo::~BehaviorInfo() = default; + +bool BehaviorInfo::UpdateInput(const std::vector<u8>& buffer, std::size_t offset) { + if (!CanConsumeBuffer(buffer.size(), offset, sizeof(InParams))) { + LOG_ERROR(Audio, "Buffer is an invalid size!"); + return false; + } + InParams params{}; + std::memcpy(¶ms, buffer.data() + offset, sizeof(InParams)); + + if (!IsValidRevision(params.revision)) { + LOG_ERROR(Audio, "Invalid input revision, revision=0x{:08X}", params.revision); + return false; + } + + if (user_revision != params.revision) { + LOG_ERROR(Audio, + "User revision differs from input revision, expecting 0x{:08X} but got 0x{:08X}", + user_revision, params.revision); + return false; + } + + ClearError(); + UpdateFlags(params.flags); + + // TODO(ogniK): Check input params size when InfoUpdater is used + + return true; +} + +bool BehaviorInfo::UpdateOutput(std::vector<u8>& buffer, std::size_t offset) { + if (!CanConsumeBuffer(buffer.size(), offset, sizeof(OutParams))) { + LOG_ERROR(Audio, "Buffer is an invalid size!"); + return false; + } + + OutParams params{}; + std::memcpy(params.errors.data(), errors.data(), sizeof(ErrorInfo) * errors.size()); + params.error_count = static_cast<u32_le>(error_count); + std::memcpy(buffer.data() + offset, ¶ms, sizeof(OutParams)); + return true; +} + +void BehaviorInfo::ClearError() { + error_count = 0; +} + +void BehaviorInfo::UpdateFlags(u64_le dest_flags) { + flags = dest_flags; +} + +void BehaviorInfo::SetUserRevision(u32_le revision) { + user_revision = revision; +} + +bool BehaviorInfo::IsAdpcmLoopContextBugFixed() const { + return IsRevisionSupported(2, user_revision); +} + +bool BehaviorInfo::IsSplitterSupported() const { + return IsRevisionSupported(2, user_revision); +} + +bool BehaviorInfo::IsLongSizePreDelaySupported() const { + return IsRevisionSupported(3, user_revision); +} + +bool BehaviorInfo::IsAudioRenererProcessingTimeLimit80PercentSupported() const { + return IsRevisionSupported(5, user_revision); +} + +bool BehaviorInfo::IsAudioRenererProcessingTimeLimit75PercentSupported() const { + return IsRevisionSupported(4, user_revision); +} + +bool BehaviorInfo::IsAudioRenererProcessingTimeLimit70PercentSupported() const { + return IsRevisionSupported(1, user_revision); +} + +bool BehaviorInfo::IsElapsedFrameCountSupported() const { + return IsRevisionSupported(5, user_revision); +} + +bool BehaviorInfo::IsMemoryPoolForceMappingEnabled() const { + return (flags & 1) != 0; +} + +} // namespace AudioCore diff --git a/src/audio_core/behavior_info.h b/src/audio_core/behavior_info.h new file mode 100644 index 000000000..c5e91ab39 --- /dev/null +++ b/src/audio_core/behavior_info.h @@ -0,0 +1,66 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include <array> + +#include <vector> +#include "common/common_funcs.h" +#include "common/common_types.h" +#include "common/swap.h" + +namespace AudioCore { +class BehaviorInfo { +public: + explicit BehaviorInfo(); + ~BehaviorInfo(); + + bool UpdateInput(const std::vector<u8>& buffer, std::size_t offset); + bool UpdateOutput(std::vector<u8>& buffer, std::size_t offset); + + void ClearError(); + void UpdateFlags(u64_le dest_flags); + void SetUserRevision(u32_le revision); + + bool IsAdpcmLoopContextBugFixed() const; + bool IsSplitterSupported() const; + bool IsLongSizePreDelaySupported() const; + bool IsAudioRenererProcessingTimeLimit80PercentSupported() const; + bool IsAudioRenererProcessingTimeLimit75PercentSupported() const; + bool IsAudioRenererProcessingTimeLimit70PercentSupported() const; + bool IsElapsedFrameCountSupported() const; + bool IsMemoryPoolForceMappingEnabled() const; + +private: + u32_le process_revision{}; + u32_le user_revision{}; + u64_le flags{}; + + struct ErrorInfo { + u32_le result{}; + INSERT_PADDING_WORDS(1); + u64_le result_info{}; + }; + static_assert(sizeof(ErrorInfo) == 0x10, "ErrorInfo is an invalid size"); + + std::array<ErrorInfo, 10> errors{}; + std::size_t error_count{}; + + struct InParams { + u32_le revision{}; + u32_le padding{}; + u64_le flags{}; + }; + static_assert(sizeof(InParams) == 0x10, "InParams is an invalid size"); + + struct OutParams { + std::array<ErrorInfo, 10> errors{}; + u32_le error_count{}; + INSERT_PADDING_BYTES(12); + }; + static_assert(sizeof(OutParams) == 0xb0, "OutParams is an invalid size"); +}; + +} // namespace AudioCore diff --git a/src/audio_core/common.h b/src/audio_core/common.h new file mode 100644 index 000000000..98478b66b --- /dev/null +++ b/src/audio_core/common.h @@ -0,0 +1,47 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once +#include "common/common_funcs.h" +#include "common/common_types.h" +#include "common/swap.h" +#include "core/hle/result.h" + +namespace AudioCore { +namespace Audren { +constexpr ResultCode ERR_INVALID_PARAMETERS{ErrorModule::Audio, 41}; +} + +constexpr u32_le CURRENT_PROCESS_REVISION = Common::MakeMagic('R', 'E', 'V', '8'); + +static constexpr u32 VersionFromRevision(u32_le rev) { + // "REV7" -> 7 + return ((rev >> 24) & 0xff) - 0x30; +} + +static constexpr bool IsRevisionSupported(u32 required, u32_le user_revision) { + const auto base = VersionFromRevision(user_revision); + return required <= base; +} + +static constexpr bool IsValidRevision(u32_le revision) { + const auto base = VersionFromRevision(revision); + constexpr auto max_rev = VersionFromRevision(CURRENT_PROCESS_REVISION); + return base <= max_rev; +} + +static constexpr bool CanConsumeBuffer(std::size_t size, std::size_t offset, std::size_t required) { + if (offset > size) { + return false; + } + if (size < required) { + return false; + } + if ((size - offset) < required) { + return false; + } + return true; +} + +} // namespace AudioCore diff --git a/src/common/CMakeLists.txt b/src/common/CMakeLists.txt index eeceaa655..6ffc612e7 100644 --- a/src/common/CMakeLists.txt +++ b/src/common/CMakeLists.txt @@ -155,6 +155,8 @@ add_library(common STATIC uuid.cpp uuid.h vector_math.h + virtual_buffer.cpp + virtual_buffer.h web_result.h zstd_compression.cpp zstd_compression.h diff --git a/src/common/alignment.h b/src/common/alignment.h index cdd4833f8..f8c49e079 100644 --- a/src/common/alignment.h +++ b/src/common/alignment.h @@ -38,6 +38,13 @@ constexpr bool IsWordAligned(T value) { return (value & 0b11) == 0; } +template <typename T> +constexpr bool IsAligned(T value, std::size_t alignment) { + using U = typename std::make_unsigned<T>::type; + const U mask = static_cast<U>(alignment - 1); + return (value & mask) == 0; +} + template <typename T, std::size_t Align = 16> class AlignmentAllocator { public: diff --git a/src/common/common_funcs.h b/src/common/common_funcs.h index 052254678..88cf5250a 100644 --- a/src/common/common_funcs.h +++ b/src/common/common_funcs.h @@ -55,6 +55,38 @@ __declspec(dllimport) void __stdcall DebugBreak(void); // Defined in Misc.cpp. std::string GetLastErrorMsg(); +#define DECLARE_ENUM_FLAG_OPERATORS(type) \ + constexpr type operator|(type a, type b) noexcept { \ + using T = std::underlying_type_t<type>; \ + return static_cast<type>(static_cast<T>(a) | static_cast<T>(b)); \ + } \ + constexpr type operator&(type a, type b) noexcept { \ + using T = std::underlying_type_t<type>; \ + return static_cast<type>(static_cast<T>(a) & static_cast<T>(b)); \ + } \ + constexpr type& operator|=(type& a, type b) noexcept { \ + using T = std::underlying_type_t<type>; \ + a = static_cast<type>(static_cast<T>(a) | static_cast<T>(b)); \ + return a; \ + } \ + constexpr type& operator&=(type& a, type b) noexcept { \ + using T = std::underlying_type_t<type>; \ + a = static_cast<type>(static_cast<T>(a) & static_cast<T>(b)); \ + return a; \ + } \ + constexpr type operator~(type key) noexcept { \ + using T = std::underlying_type_t<type>; \ + return static_cast<type>(~static_cast<T>(key)); \ + } \ + constexpr bool True(type key) noexcept { \ + using T = std::underlying_type_t<type>; \ + return static_cast<T>(key) != 0; \ + } \ + constexpr bool False(type key) noexcept { \ + using T = std::underlying_type_t<type>; \ + return static_cast<T>(key) == 0; \ + } + namespace Common { constexpr u32 MakeMagic(char a, char b, char c, char d) { diff --git a/src/common/file_util.cpp b/src/common/file_util.cpp index 35eee0096..45b750e1e 100644 --- a/src/common/file_util.cpp +++ b/src/common/file_util.cpp @@ -888,7 +888,14 @@ std::string SanitizePath(std::string_view path_, DirectorySeparator directory_se } std::replace(path.begin(), path.end(), type1, type2); - path.erase(std::unique(path.begin(), path.end(), + + auto start = path.begin(); +#ifdef _WIN32 + // allow network paths which start with a double backslash (e.g. \\server\share) + if (start != path.end()) + ++start; +#endif + path.erase(std::unique(start, path.end(), [type2](char c1, char c2) { return c1 == type2 && c2 == type2; }), path.end()); return std::string(RemoveTrailingSlash(path)); @@ -967,6 +974,34 @@ bool IOFile::Flush() { return IsOpen() && 0 == std::fflush(m_file); } +std::size_t IOFile::ReadImpl(void* data, std::size_t length, std::size_t data_size) const { + if (!IsOpen()) { + return std::numeric_limits<std::size_t>::max(); + } + + if (length == 0) { + return 0; + } + + DEBUG_ASSERT(data != nullptr); + + return std::fread(data, data_size, length, m_file); +} + +std::size_t IOFile::WriteImpl(const void* data, std::size_t length, std::size_t data_size) { + if (!IsOpen()) { + return std::numeric_limits<std::size_t>::max(); + } + + if (length == 0) { + return 0; + } + + DEBUG_ASSERT(data != nullptr); + + return std::fwrite(data, data_size, length, m_file); +} + bool IOFile::Resize(u64 size) { return IsOpen() && 0 == #ifdef _WIN32 diff --git a/src/common/file_util.h b/src/common/file_util.h index cde7ddf2d..f7a0c33fa 100644 --- a/src/common/file_util.h +++ b/src/common/file_util.h @@ -222,22 +222,15 @@ public: static_assert(std::is_trivially_copyable_v<T>, "Given array does not consist of trivially copyable objects"); - if (!IsOpen()) { - return std::numeric_limits<std::size_t>::max(); - } - - return std::fread(data, sizeof(T), length, m_file); + return ReadImpl(data, length, sizeof(T)); } template <typename T> std::size_t WriteArray(const T* data, std::size_t length) { static_assert(std::is_trivially_copyable_v<T>, "Given array does not consist of trivially copyable objects"); - if (!IsOpen()) { - return std::numeric_limits<std::size_t>::max(); - } - return std::fwrite(data, sizeof(T), length, m_file); + return WriteImpl(data, length, sizeof(T)); } template <typename T> @@ -278,6 +271,9 @@ public: } private: + std::size_t ReadImpl(void* data, std::size_t length, std::size_t data_size) const; + std::size_t WriteImpl(const void* data, std::size_t length, std::size_t data_size); + std::FILE* m_file = nullptr; }; diff --git a/src/common/page_table.cpp b/src/common/page_table.cpp index 566b57b62..e5d3090d5 100644 --- a/src/common/page_table.cpp +++ b/src/common/page_table.cpp @@ -6,36 +6,20 @@ namespace Common { -PageTable::PageTable(std::size_t page_size_in_bits) : page_size_in_bits{page_size_in_bits} {} +PageTable::PageTable() = default; PageTable::~PageTable() = default; -void PageTable::Resize(std::size_t address_space_width_in_bits) { - const std::size_t num_page_table_entries = 1ULL - << (address_space_width_in_bits - page_size_in_bits); - +void PageTable::Resize(std::size_t address_space_width_in_bits, std::size_t page_size_in_bits, + bool has_attribute) { + const std::size_t num_page_table_entries{1ULL + << (address_space_width_in_bits - page_size_in_bits)}; pointers.resize(num_page_table_entries); - attributes.resize(num_page_table_entries); - - // The default is a 39-bit address space, which causes an initial 1GB allocation size. If the - // vector size is subsequently decreased (via resize), the vector might not automatically - // actually reallocate/resize its underlying allocation, which wastes up to ~800 MB for - // 36-bit titles. Call shrink_to_fit to reduce capacity to what's actually in use. - - pointers.shrink_to_fit(); - attributes.shrink_to_fit(); -} - -BackingPageTable::BackingPageTable(std::size_t page_size_in_bits) : PageTable{page_size_in_bits} {} - -BackingPageTable::~BackingPageTable() = default; - -void BackingPageTable::Resize(std::size_t address_space_width_in_bits) { - PageTable::Resize(address_space_width_in_bits); - const std::size_t num_page_table_entries = 1ULL - << (address_space_width_in_bits - page_size_in_bits); backing_addr.resize(num_page_table_entries); - backing_addr.shrink_to_fit(); + + if (has_attribute) { + attributes.resize(num_page_table_entries); + } } } // namespace Common diff --git a/src/common/page_table.h b/src/common/page_table.h index dbc272ab7..1e8bd3187 100644 --- a/src/common/page_table.h +++ b/src/common/page_table.h @@ -5,9 +5,12 @@ #pragma once #include <vector> + #include <boost/icl/interval_map.hpp> + #include "common/common_types.h" #include "common/memory_hook.h" +#include "common/virtual_buffer.h" namespace Common { @@ -47,7 +50,7 @@ struct SpecialRegion { * mimics the way a real CPU page table works. */ struct PageTable { - explicit PageTable(std::size_t page_size_in_bits); + PageTable(); ~PageTable(); /** @@ -56,40 +59,18 @@ struct PageTable { * * @param address_space_width_in_bits The address size width in bits. */ - void Resize(std::size_t address_space_width_in_bits); + void Resize(std::size_t address_space_width_in_bits, std::size_t page_size_in_bits, + bool has_attribute); /** * Vector of memory pointers backing each page. An entry can only be non-null if the * corresponding entry in the `attributes` vector is of type `Memory`. */ - std::vector<u8*> pointers; - - /** - * Contains MMIO handlers that back memory regions whose entries in the `attribute` vector is - * of type `Special`. - */ - boost::icl::interval_map<u64, std::set<SpecialRegion>> special_regions; - - /** - * Vector of fine grained page attributes. If it is set to any value other than `Memory`, then - * the corresponding entry in `pointers` MUST be set to null. - */ - std::vector<PageType> attributes; - - const std::size_t page_size_in_bits{}; -}; - -/** - * A more advanced Page Table with the ability to save a backing address when using it - * depends on another MMU. - */ -struct BackingPageTable : PageTable { - explicit BackingPageTable(std::size_t page_size_in_bits); - ~BackingPageTable(); + VirtualBuffer<u8*> pointers; - void Resize(std::size_t address_space_width_in_bits); + VirtualBuffer<u64> backing_addr; - std::vector<u64> backing_addr; + VirtualBuffer<PageType> attributes; }; } // namespace Common diff --git a/src/common/scope_exit.h b/src/common/scope_exit.h index 1176a72b1..68ef5f197 100644 --- a/src/common/scope_exit.h +++ b/src/common/scope_exit.h @@ -12,10 +12,17 @@ template <typename Func> struct ScopeExitHelper { explicit ScopeExitHelper(Func&& func) : func(std::move(func)) {} ~ScopeExitHelper() { - func(); + if (active) { + func(); + } + } + + void Cancel() { + active = false; } Func func; + bool active{true}; }; template <typename Func> diff --git a/src/common/virtual_buffer.cpp b/src/common/virtual_buffer.cpp new file mode 100644 index 000000000..b426f4747 --- /dev/null +++ b/src/common/virtual_buffer.cpp @@ -0,0 +1,52 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#ifdef _WIN32 +#include <windows.h> +#else +#include <stdio.h> +#include <sys/mman.h> +#include <sys/types.h> +#if defined __APPLE__ || defined __FreeBSD__ || defined __OpenBSD__ +#include <sys/sysctl.h> +#elif defined __HAIKU__ +#include <OS.h> +#else +#include <sys/sysinfo.h> +#endif +#endif + +#include "common/assert.h" +#include "common/virtual_buffer.h" + +namespace Common { + +void* AllocateMemoryPages(std::size_t size) { +#ifdef _WIN32 + void* base{VirtualAlloc(nullptr, size, MEM_COMMIT, PAGE_READWRITE)}; +#else + void* base{mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0)}; + + if (base == MAP_FAILED) { + base = nullptr; + } +#endif + + ASSERT(base); + + return base; +} + +void FreeMemoryPages(void* base, std::size_t size) { + if (!base) { + return; + } +#ifdef _WIN32 + ASSERT(VirtualFree(base, 0, MEM_RELEASE)); +#else + ASSERT(munmap(base, size) == 0); +#endif +} + +} // namespace Common diff --git a/src/common/virtual_buffer.h b/src/common/virtual_buffer.h new file mode 100644 index 000000000..da064e59e --- /dev/null +++ b/src/common/virtual_buffer.h @@ -0,0 +1,58 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include "common/common_funcs.h" + +namespace Common { + +void* AllocateMemoryPages(std::size_t size); +void FreeMemoryPages(void* base, std::size_t size); + +template <typename T> +class VirtualBuffer final : NonCopyable { +public: + constexpr VirtualBuffer() = default; + explicit VirtualBuffer(std::size_t count) : alloc_size{count * sizeof(T)} { + base_ptr = reinterpret_cast<T*>(AllocateMemoryPages(alloc_size)); + } + + ~VirtualBuffer() { + FreeMemoryPages(base_ptr, alloc_size); + } + + void resize(std::size_t count) { + FreeMemoryPages(base_ptr, alloc_size); + + alloc_size = count * sizeof(T); + base_ptr = reinterpret_cast<T*>(AllocateMemoryPages(alloc_size)); + } + + constexpr const T& operator[](std::size_t index) const { + return base_ptr[index]; + } + + constexpr T& operator[](std::size_t index) { + return base_ptr[index]; + } + + constexpr T* data() { + return base_ptr; + } + + constexpr const T* data() const { + return base_ptr; + } + + constexpr std::size_t size() const { + return alloc_size / sizeof(T); + } + +private: + std::size_t alloc_size{}; + T* base_ptr{}; +}; + +} // namespace Common diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index c15d9f52f..8546d3602 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt @@ -35,6 +35,8 @@ add_library(core STATIC crypto/ctr_encryption_layer.h crypto/xts_encryption_layer.cpp crypto/xts_encryption_layer.h + device_memory.cpp + device_memory.h file_sys/bis_factory.cpp file_sys/bis_factory.h file_sys/card_image.cpp @@ -152,6 +154,23 @@ add_library(core STATIC hle/kernel/hle_ipc.h hle/kernel/kernel.cpp hle/kernel/kernel.h + hle/kernel/memory/address_space_info.cpp + hle/kernel/memory/address_space_info.h + hle/kernel/memory/memory_block.h + hle/kernel/memory/memory_block_manager.cpp + hle/kernel/memory/memory_block_manager.h + hle/kernel/memory/memory_layout.h + hle/kernel/memory/memory_manager.cpp + hle/kernel/memory/memory_manager.h + hle/kernel/memory/memory_types.h + hle/kernel/memory/page_linked_list.h + hle/kernel/memory/page_heap.cpp + hle/kernel/memory/page_heap.h + hle/kernel/memory/page_table.cpp + hle/kernel/memory/page_table.h + hle/kernel/memory/slab_heap.h + hle/kernel/memory/system_control.cpp + hle/kernel/memory/system_control.h hle/kernel/mutex.cpp hle/kernel/mutex.h hle/kernel/object.cpp @@ -178,6 +197,7 @@ add_library(core STATIC hle/kernel/shared_memory.h hle/kernel/svc.cpp hle/kernel/svc.h + hle/kernel/svc_types.h hle/kernel/svc_wrap.h hle/kernel/synchronization_object.cpp hle/kernel/synchronization_object.h @@ -189,8 +209,6 @@ add_library(core STATIC hle/kernel/time_manager.h hle/kernel/transfer_memory.cpp hle/kernel/transfer_memory.h - hle/kernel/vm_manager.cpp - hle/kernel/vm_manager.h hle/kernel/writable_event.cpp hle/kernel/writable_event.h hle/lock.cpp diff --git a/src/core/arm/arm_interface.cpp b/src/core/arm/arm_interface.cpp index fb9e616b9..d079a1bc8 100644 --- a/src/core/arm/arm_interface.cpp +++ b/src/core/arm/arm_interface.cpp @@ -60,7 +60,7 @@ static_assert(sizeof(ELFSymbol) == 0x18, "ELFSymbol has incorrect size."); using Symbols = std::vector<std::pair<ELFSymbol, std::string>>; -Symbols GetSymbols(VAddr text_offset, Memory::Memory& memory) { +Symbols GetSymbols(VAddr text_offset, Core::Memory::Memory& memory) { const auto mod_offset = text_offset + memory.Read32(text_offset + 4); if (mod_offset < text_offset || (mod_offset & 0b11) != 0 || diff --git a/src/core/arm/arm_interface.h b/src/core/arm/arm_interface.h index 57eae839e..cb2e640e2 100644 --- a/src/core/arm/arm_interface.h +++ b/src/core/arm/arm_interface.h @@ -26,28 +26,28 @@ public: virtual ~ARM_Interface() = default; struct ThreadContext32 { - std::array<u32, 16> cpu_registers; - u32 cpsr; - std::array<u8, 4> padding; - std::array<u64, 32> fprs; - u32 fpscr; - u32 fpexc; - u32 tpidr; + std::array<u32, 16> cpu_registers{}; + u32 cpsr{}; + std::array<u8, 4> padding{}; + std::array<u64, 32> fprs{}; + u32 fpscr{}; + u32 fpexc{}; + u32 tpidr{}; }; // Internally within the kernel, it expects the AArch32 version of the // thread context to be 344 bytes in size. static_assert(sizeof(ThreadContext32) == 0x158); struct ThreadContext64 { - std::array<u64, 31> cpu_registers; - u64 sp; - u64 pc; - u32 pstate; - std::array<u8, 4> padding; - std::array<u128, 32> vector_registers; - u32 fpcr; - u32 fpsr; - u64 tpidr; + std::array<u64, 31> cpu_registers{}; + u64 sp{}; + u64 pc{}; + u32 pstate{}; + std::array<u8, 4> padding{}; + std::array<u128, 32> vector_registers{}; + u32 fpcr{}; + u32 fpsr{}; + u64 tpidr{}; }; // Internally within the kernel, it expects the AArch64 version of the // thread context to be 800 bytes in size. diff --git a/src/core/arm/dynarmic/arm_dynarmic_32.cpp b/src/core/arm/dynarmic/arm_dynarmic_32.cpp index 187a972ac..9bc86e3b9 100644 --- a/src/core/arm/dynarmic/arm_dynarmic_32.cpp +++ b/src/core/arm/dynarmic/arm_dynarmic_32.cpp @@ -67,7 +67,7 @@ public: } void CallSVC(u32 swi) override { - Kernel::CallSVC(parent.system, swi); + Kernel::Svc::Call(parent.system, swi); } void AddTicks(u64 ticks) override { diff --git a/src/core/arm/dynarmic/arm_dynarmic_32.h b/src/core/arm/dynarmic/arm_dynarmic_32.h index 143e46e4d..8ba9cea8f 100644 --- a/src/core/arm/dynarmic/arm_dynarmic_32.h +++ b/src/core/arm/dynarmic/arm_dynarmic_32.h @@ -15,7 +15,7 @@ #include "core/arm/arm_interface.h" #include "core/arm/exclusive_monitor.h" -namespace Memory { +namespace Core::Memory { class Memory; } diff --git a/src/core/arm/dynarmic/arm_dynarmic_64.cpp b/src/core/arm/dynarmic/arm_dynarmic_64.cpp index a53a58ba0..65cbfe5e6 100644 --- a/src/core/arm/dynarmic/arm_dynarmic_64.cpp +++ b/src/core/arm/dynarmic/arm_dynarmic_64.cpp @@ -8,6 +8,7 @@ #include <dynarmic/A64/config.h> #include "common/logging/log.h" #include "common/microprofile.h" +#include "common/page_table.h" #include "core/arm/dynarmic/arm_dynarmic_64.h" #include "core/core.h" #include "core/core_manager.h" @@ -18,8 +19,8 @@ #include "core/hle/kernel/process.h" #include "core/hle/kernel/scheduler.h" #include "core/hle/kernel/svc.h" -#include "core/hle/kernel/vm_manager.h" #include "core/memory.h" +#include "core/settings.h" namespace Core { @@ -103,7 +104,7 @@ public: } void CallSVC(u32 swi) override { - Kernel::CallSVC(parent.system, swi); + Kernel::Svc::Call(parent.system, swi); } void AddTicks(u64 ticks) override { @@ -144,6 +145,8 @@ std::shared_ptr<Dynarmic::A64::Jit> ARM_Dynarmic_64::MakeJit(Common::PageTable& config.page_table_address_space_bits = address_space_bits; config.silently_mirror_page_table = false; config.absolute_offset_page_table = true; + config.detect_misaligned_access_via_page_table = 16 | 32 | 64 | 128; + config.only_detect_misalignment_via_page_table_on_page_boundary = true; // Multi-process state config.processor_id = core_index; @@ -159,6 +162,12 @@ std::shared_ptr<Dynarmic::A64::Jit> ARM_Dynarmic_64::MakeJit(Common::PageTable& // Unpredictable instructions config.define_unpredictable_behaviour = true; + // Optimizations + if (Settings::values.disable_cpu_opt) { + config.enable_optimizations = false; + config.enable_fast_dispatch = false; + } + return std::make_shared<Dynarmic::A64::Jit>(config); } diff --git a/src/core/arm/dynarmic/arm_dynarmic_64.h b/src/core/arm/dynarmic/arm_dynarmic_64.h index e71240a96..647cecaf0 100644 --- a/src/core/arm/dynarmic/arm_dynarmic_64.h +++ b/src/core/arm/dynarmic/arm_dynarmic_64.h @@ -15,7 +15,7 @@ #include "core/arm/exclusive_monitor.h" #include "core/arm/unicorn/arm_unicorn.h" -namespace Memory { +namespace Core::Memory { class Memory; } @@ -92,7 +92,7 @@ public: private: friend class ARM_Dynarmic_64; Dynarmic::A64::ExclusiveMonitor monitor; - Memory::Memory& memory; + Core::Memory::Memory& memory; }; } // namespace Core diff --git a/src/core/arm/exclusive_monitor.h b/src/core/arm/exclusive_monitor.h index 4ef418b90..ccd73b80f 100644 --- a/src/core/arm/exclusive_monitor.h +++ b/src/core/arm/exclusive_monitor.h @@ -8,7 +8,7 @@ #include "common/common_types.h" -namespace Memory { +namespace Core::Memory { class Memory; } diff --git a/src/core/arm/unicorn/arm_unicorn.cpp b/src/core/arm/unicorn/arm_unicorn.cpp index 8a9800a96..b96583123 100644 --- a/src/core/arm/unicorn/arm_unicorn.cpp +++ b/src/core/arm/unicorn/arm_unicorn.cpp @@ -11,6 +11,7 @@ #include "core/core_timing.h" #include "core/hle/kernel/scheduler.h" #include "core/hle/kernel/svc.h" +#include "core/memory.h" namespace Core { @@ -171,7 +172,17 @@ MICROPROFILE_DEFINE(ARM_Jit_Unicorn, "ARM JIT", "Unicorn", MP_RGB(255, 64, 64)); void ARM_Unicorn::ExecuteInstructions(std::size_t num_instructions) { MICROPROFILE_SCOPE(ARM_Jit_Unicorn); + + // Temporarily map the code page for Unicorn + u64 map_addr{GetPC() & ~Memory::PAGE_MASK}; + std::vector<u8> page_buffer(Memory::PAGE_SIZE); + system.Memory().ReadBlock(map_addr, page_buffer.data(), page_buffer.size()); + + CHECKED(uc_mem_map_ptr(uc, map_addr, page_buffer.size(), + UC_PROT_READ | UC_PROT_WRITE | UC_PROT_EXEC, page_buffer.data())); CHECKED(uc_emu_start(uc, GetPC(), 1ULL << 63, 0, num_instructions)); + CHECKED(uc_mem_unmap(uc, map_addr, page_buffer.size())); + system.CoreTiming().AddTicks(num_instructions); if (GDBStub::IsServerEnabled()) { if (last_bkpt_hit && last_bkpt.type == GDBStub::BreakpointType::Execute) { @@ -266,7 +277,7 @@ void ARM_Unicorn::InterruptHook(uc_engine* uc, u32 int_no, void* user_data) { switch (ec) { case 0x15: // SVC - Kernel::CallSVC(arm_instance->system, iss); + Kernel::Svc::Call(arm_instance->system, iss); break; } } diff --git a/src/core/core.cpp b/src/core/core.cpp index 3bd90d79f..f9f8a3000 100644 --- a/src/core/core.cpp +++ b/src/core/core.cpp @@ -14,6 +14,7 @@ #include "core/core_manager.h" #include "core/core_timing.h" #include "core/cpu_manager.h" +#include "core/device_memory.h" #include "core/file_sys/bis_factory.h" #include "core/file_sys/card_image.h" #include "core/file_sys/mode.h" @@ -140,6 +141,8 @@ struct System::Impl { ResultStatus Init(System& system, Frontend::EmuWindow& emu_window) { LOG_DEBUG(HW_Memory, "initialized OK"); + device_memory = std::make_unique<Core::DeviceMemory>(system); + core_timing.Initialize(); kernel.Initialize(); cpu_manager.Initialize(); @@ -276,6 +279,7 @@ struct System::Impl { telemetry_session.reset(); perf_stats.reset(); gpu_core.reset(); + device_memory.reset(); // Close all CPU/threading state cpu_manager.Shutdown(); @@ -346,7 +350,8 @@ struct System::Impl { std::unique_ptr<Loader::AppLoader> app_loader; std::unique_ptr<Tegra::GPU> gpu_core; std::unique_ptr<Hardware::InterruptManager> interrupt_manager; - Memory::Memory memory; + std::unique_ptr<Core::DeviceMemory> device_memory; + Core::Memory::Memory memory; CpuManager cpu_manager; bool is_powered_on = false; bool exit_lock = false; @@ -472,6 +477,14 @@ Kernel::Process* System::CurrentProcess() { return impl->kernel.CurrentProcess(); } +Core::DeviceMemory& System::DeviceMemory() { + return *impl->device_memory; +} + +const Core::DeviceMemory& System::DeviceMemory() const { + return *impl->device_memory; +} + const Kernel::Process* System::CurrentProcess() const { return impl->kernel.CurrentProcess(); } @@ -505,7 +518,7 @@ Memory::Memory& System::Memory() { return impl->memory; } -const Memory::Memory& System::Memory() const { +const Core::Memory::Memory& System::Memory() const { return impl->memory; } diff --git a/src/core/core.h b/src/core/core.h index 8d862a8e6..acc53d6a1 100644 --- a/src/core/core.h +++ b/src/core/core.h @@ -36,9 +36,10 @@ class AppLoader; enum class ResultStatus : u16; } // namespace Loader -namespace Memory { +namespace Core::Memory { struct CheatEntry; -} // namespace Memory +class Memory; +} // namespace Core::Memory namespace Service { @@ -86,14 +87,11 @@ namespace Core::Hardware { class InterruptManager; } -namespace Memory { -class Memory; -} - namespace Core { class ARM_Interface; class CoreManager; +class DeviceMemory; class ExclusiveMonitor; class FrameLimiter; class PerfStats; @@ -230,10 +228,10 @@ public: const ExclusiveMonitor& Monitor() const; /// Gets a mutable reference to the system memory instance. - Memory::Memory& Memory(); + Core::Memory::Memory& Memory(); /// Gets a constant reference to the system memory instance. - const Memory::Memory& Memory() const; + const Core::Memory::Memory& Memory() const; /// Gets a mutable reference to the GPU interface Tegra::GPU& GPU(); @@ -259,6 +257,12 @@ public: /// Gets the global scheduler const Kernel::GlobalScheduler& GlobalScheduler() const; + /// Gets the manager for the guest device memory + Core::DeviceMemory& DeviceMemory(); + + /// Gets the manager for the guest device memory + const Core::DeviceMemory& DeviceMemory() const; + /// Provides a pointer to the current process Kernel::Process* CurrentProcess(); diff --git a/src/core/core_manager.h b/src/core/core_manager.h index b14e723d7..d525de00a 100644 --- a/src/core/core_manager.h +++ b/src/core/core_manager.h @@ -22,7 +22,7 @@ namespace Core::Timing { class CoreTiming; } -namespace Memory { +namespace Core::Memory { class Memory; } diff --git a/src/core/crypto/key_manager.cpp b/src/core/crypto/key_manager.cpp index 87e6a1fd3..8997c7082 100644 --- a/src/core/crypto/key_manager.cpp +++ b/src/core/crypto/key_manager.cpp @@ -1202,7 +1202,8 @@ const boost::container::flat_map<std::string, KeyIndex<S128KeyType>> KeyManager: {S128KeyType::Source, static_cast<u64>(SourceKeyType::KeyAreaKey), static_cast<u64>(KeyAreaKeyType::System)}}, {"titlekek_source", {S128KeyType::Source, static_cast<u64>(SourceKeyType::Titlekek), 0}}, - {"keyblob_mac_key_source", {S128KeyType::Source, static_cast<u64>(SourceKeyType::KeyblobMAC)}}, + {"keyblob_mac_key_source", + {S128KeyType::Source, static_cast<u64>(SourceKeyType::KeyblobMAC), 0}}, {"tsec_key", {S128KeyType::TSEC, 0, 0}}, {"secure_boot_key", {S128KeyType::SecureBoot, 0, 0}}, {"sd_seed", {S128KeyType::SDSeed, 0, 0}}, diff --git a/src/core/device_memory.cpp b/src/core/device_memory.cpp new file mode 100644 index 000000000..51097ced3 --- /dev/null +++ b/src/core/device_memory.cpp @@ -0,0 +1,15 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "core/core.h" +#include "core/device_memory.h" +#include "core/memory.h" + +namespace Core { + +DeviceMemory::DeviceMemory(System& system) : buffer{DramMemoryMap::Size}, system{system} {} + +DeviceMemory::~DeviceMemory() = default; + +} // namespace Core diff --git a/src/core/device_memory.h b/src/core/device_memory.h new file mode 100644 index 000000000..9efa088d0 --- /dev/null +++ b/src/core/device_memory.h @@ -0,0 +1,51 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include "common/assert.h" +#include "common/common_funcs.h" +#include "common/virtual_buffer.h" + +namespace Core { + +class System; + +namespace DramMemoryMap { +enum : u64 { + Base = 0x80000000ULL, + Size = 0x100000000ULL, + End = Base + Size, + KernelReserveBase = Base + 0x60000, + SlabHeapBase = KernelReserveBase + 0x85000, + SlapHeapSize = 0xa21000, + SlabHeapEnd = SlabHeapBase + SlapHeapSize, +}; +}; // namespace DramMemoryMap + +class DeviceMemory : NonCopyable { +public: + explicit DeviceMemory(Core::System& system); + ~DeviceMemory(); + + template <typename T> + PAddr GetPhysicalAddr(const T* ptr) const { + return (reinterpret_cast<uintptr_t>(ptr) - reinterpret_cast<uintptr_t>(buffer.data())) + + DramMemoryMap::Base; + } + + u8* GetPointer(PAddr addr) { + return buffer.data() + (addr - DramMemoryMap::Base); + } + + const u8* GetPointer(PAddr addr) const { + return buffer.data() + (addr - DramMemoryMap::Base); + } + +private: + Common::VirtualBuffer<u8> buffer; + Core::System& system; +}; + +} // namespace Core diff --git a/src/core/file_sys/patch_manager.cpp b/src/core/file_sys/patch_manager.cpp index 81ec06cd4..b93aa6935 100644 --- a/src/core/file_sys/patch_manager.cpp +++ b/src/core/file_sys/patch_manager.cpp @@ -249,7 +249,7 @@ bool PatchManager::HasNSOPatch(const std::array<u8, 32>& build_id_) const { } namespace { -std::optional<std::vector<Memory::CheatEntry>> ReadCheatFileFromFolder( +std::optional<std::vector<Core::Memory::CheatEntry>> ReadCheatFileFromFolder( const Core::System& system, u64 title_id, const std::array<u8, 0x20>& build_id_, const VirtualDir& base_path, bool upper) { const auto build_id_raw = Common::HexToString(build_id_, upper); @@ -269,14 +269,14 @@ std::optional<std::vector<Memory::CheatEntry>> ReadCheatFileFromFolder( return std::nullopt; } - Memory::TextCheatParser parser; + Core::Memory::TextCheatParser parser; return parser.Parse( system, std::string_view(reinterpret_cast<const char* const>(data.data()), data.size())); } } // Anonymous namespace -std::vector<Memory::CheatEntry> PatchManager::CreateCheatList( +std::vector<Core::Memory::CheatEntry> PatchManager::CreateCheatList( const Core::System& system, const std::array<u8, 32>& build_id_) const { const auto load_dir = system.GetFileSystemController().GetModificationLoadRoot(title_id); if (load_dir == nullptr) { @@ -289,7 +289,7 @@ std::vector<Memory::CheatEntry> PatchManager::CreateCheatList( std::sort(patch_dirs.begin(), patch_dirs.end(), [](const VirtualDir& l, const VirtualDir& r) { return l->GetName() < r->GetName(); }); - std::vector<Memory::CheatEntry> out; + std::vector<Core::Memory::CheatEntry> out; for (const auto& subdir : patch_dirs) { if (std::find(disabled.cbegin(), disabled.cend(), subdir->GetName()) != disabled.cend()) { continue; diff --git a/src/core/file_sys/patch_manager.h b/src/core/file_sys/patch_manager.h index e857e6e82..ec6db524d 100644 --- a/src/core/file_sys/patch_manager.h +++ b/src/core/file_sys/patch_manager.h @@ -51,8 +51,8 @@ public: bool HasNSOPatch(const std::array<u8, 0x20>& build_id) const; // Creates a CheatList object with all - std::vector<Memory::CheatEntry> CreateCheatList(const Core::System& system, - const std::array<u8, 0x20>& build_id) const; + std::vector<Core::Memory::CheatEntry> CreateCheatList( + const Core::System& system, const std::array<u8, 0x20>& build_id) const; // Currently tracked RomFS patches: // - Game Updates diff --git a/src/core/file_sys/program_metadata.cpp b/src/core/file_sys/program_metadata.cpp index 1d6c30962..43169bf9f 100644 --- a/src/core/file_sys/program_metadata.cpp +++ b/src/core/file_sys/program_metadata.cpp @@ -51,6 +51,17 @@ Loader::ResultStatus ProgramMetadata::Load(VirtualFile file) { return Loader::ResultStatus::Success; } +/*static*/ ProgramMetadata ProgramMetadata::GetDefault() { + ProgramMetadata result; + + result.LoadManual( + true /*is_64_bit*/, FileSys::ProgramAddressSpaceType::Is39Bit /*address_space*/, + 0x2c /*main_thread_prio*/, 0 /*main_thread_core*/, 0x00100000 /*main_thread_stack_size*/, + {}, 0xFFFFFFFFFFFFFFFF /*filesystem_permissions*/, {} /*capabilities*/); + + return result; +} + void ProgramMetadata::LoadManual(bool is_64_bit, ProgramAddressSpaceType address_space, s32 main_thread_prio, u32 main_thread_core, u32 main_thread_stack_size, u64 title_id, diff --git a/src/core/file_sys/program_metadata.h b/src/core/file_sys/program_metadata.h index f8759a396..35069972b 100644 --- a/src/core/file_sys/program_metadata.h +++ b/src/core/file_sys/program_metadata.h @@ -44,9 +44,13 @@ public: ProgramMetadata(); ~ProgramMetadata(); + /// Gets a default ProgramMetadata configuration, should only be used for homebrew formats where + /// we do not have an NPDM file + static ProgramMetadata GetDefault(); + Loader::ResultStatus Load(VirtualFile file); - // Load from parameters instead of NPDM file, used for KIP + /// Load from parameters instead of NPDM file, used for KIP void LoadManual(bool is_64_bit, ProgramAddressSpaceType address_space, s32 main_thread_prio, u32 main_thread_core, u32 main_thread_stack_size, u64 title_id, u64 filesystem_permissions, KernelCapabilityDescriptors capabilities); diff --git a/src/core/gdbstub/gdbstub.cpp b/src/core/gdbstub/gdbstub.cpp index 6d15aeed9..70c0f8b80 100644 --- a/src/core/gdbstub/gdbstub.cpp +++ b/src/core/gdbstub/gdbstub.cpp @@ -37,9 +37,9 @@ #include "core/core.h" #include "core/core_manager.h" #include "core/gdbstub/gdbstub.h" +#include "core/hle/kernel/memory/page_table.h" #include "core/hle/kernel/process.h" #include "core/hle/kernel/scheduler.h" -#include "core/hle/kernel/vm_manager.h" #include "core/loader/loader.h" #include "core/memory.h" @@ -643,7 +643,7 @@ static void HandleQuery() { SendReply(target_xml); } else if (strncmp(query, "Offsets", strlen("Offsets")) == 0) { const VAddr base_address = - Core::System::GetInstance().CurrentProcess()->VMManager().GetCodeRegionBaseAddress(); + Core::System::GetInstance().CurrentProcess()->PageTable().GetCodeRegionStart(); std::string buffer = fmt::format("TextSeg={:0x}", base_address); SendReply(buffer.c_str()); } else if (strncmp(query, "fThreadInfo", strlen("fThreadInfo")) == 0) { @@ -1389,10 +1389,9 @@ void SendTrap(Kernel::Thread* thread, int trap) { return; } - if (!halt_loop || current_thread == thread) { - current_thread = thread; - SendSignal(thread, trap); - } + current_thread = thread; + SendSignal(thread, trap); + halt_loop = true; send_trap = false; } diff --git a/src/core/hle/kernel/client_session.cpp b/src/core/hle/kernel/client_session.cpp index 6d66276bc..5ab204b9b 100644 --- a/src/core/hle/kernel/client_session.cpp +++ b/src/core/hle/kernel/client_session.cpp @@ -47,7 +47,8 @@ ResultVal<std::shared_ptr<ClientSession>> ClientSession::Create(KernelCore& kern return MakeResult(std::move(client_session)); } -ResultCode ClientSession::SendSyncRequest(std::shared_ptr<Thread> thread, Memory::Memory& memory) { +ResultCode ClientSession::SendSyncRequest(std::shared_ptr<Thread> thread, + Core::Memory::Memory& memory) { // Keep ServerSession alive until we're done working with it. if (!parent->Server()) { return ERR_SESSION_CLOSED_BY_REMOTE; diff --git a/src/core/hle/kernel/client_session.h b/src/core/hle/kernel/client_session.h index d15b09554..c5f760d7d 100644 --- a/src/core/hle/kernel/client_session.h +++ b/src/core/hle/kernel/client_session.h @@ -12,7 +12,7 @@ union ResultCode; -namespace Memory { +namespace Core::Memory { class Memory; } @@ -42,7 +42,7 @@ public: return HANDLE_TYPE; } - ResultCode SendSyncRequest(std::shared_ptr<Thread> thread, Memory::Memory& memory); + ResultCode SendSyncRequest(std::shared_ptr<Thread> thread, Core::Memory::Memory& memory); bool ShouldWait(const Thread* thread) const override; diff --git a/src/core/hle/kernel/errors.h b/src/core/hle/kernel/errors.h index 8097b3863..29bfa3621 100644 --- a/src/core/hle/kernel/errors.h +++ b/src/core/hle/kernel/errors.h @@ -14,6 +14,7 @@ constexpr ResultCode ERR_MAX_CONNECTIONS_REACHED{ErrorModule::Kernel, 7}; constexpr ResultCode ERR_INVALID_CAPABILITY_DESCRIPTOR{ErrorModule::Kernel, 14}; constexpr ResultCode ERR_INVALID_SIZE{ErrorModule::Kernel, 101}; constexpr ResultCode ERR_INVALID_ADDRESS{ErrorModule::Kernel, 102}; +constexpr ResultCode ERR_OUT_OF_RESOURCES{ErrorModule::Kernel, 103}; constexpr ResultCode ERR_OUT_OF_MEMORY{ErrorModule::Kernel, 104}; constexpr ResultCode ERR_HANDLE_TABLE_FULL{ErrorModule::Kernel, 105}; constexpr ResultCode ERR_INVALID_ADDRESS_STATE{ErrorModule::Kernel, 106}; diff --git a/src/core/hle/kernel/hle_ipc.cpp b/src/core/hle/kernel/hle_ipc.cpp index d65dae3ae..91d94025c 100644 --- a/src/core/hle/kernel/hle_ipc.cpp +++ b/src/core/hle/kernel/hle_ipc.cpp @@ -282,19 +282,19 @@ ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(Thread& thread) { return RESULT_SUCCESS; } -std::vector<u8> HLERequestContext::ReadBuffer(int buffer_index) const { +std::vector<u8> HLERequestContext::ReadBuffer(std::size_t buffer_index) const { std::vector<u8> buffer; - const bool is_buffer_a{BufferDescriptorA().size() > std::size_t(buffer_index) && + const bool is_buffer_a{BufferDescriptorA().size() > buffer_index && BufferDescriptorA()[buffer_index].Size()}; auto& memory = Core::System::GetInstance().Memory(); if (is_buffer_a) { - ASSERT_MSG(BufferDescriptorA().size() > std::size_t(buffer_index), + ASSERT_MSG(BufferDescriptorA().size() > buffer_index, "BufferDescriptorA invalid buffer_index {}", buffer_index); buffer.resize(BufferDescriptorA()[buffer_index].Size()); memory.ReadBlock(BufferDescriptorA()[buffer_index].Address(), buffer.data(), buffer.size()); } else { - ASSERT_MSG(BufferDescriptorX().size() > std::size_t(buffer_index), + ASSERT_MSG(BufferDescriptorX().size() > buffer_index, "BufferDescriptorX invalid buffer_index {}", buffer_index); buffer.resize(BufferDescriptorX()[buffer_index].Size()); memory.ReadBlock(BufferDescriptorX()[buffer_index].Address(), buffer.data(), buffer.size()); @@ -304,13 +304,13 @@ std::vector<u8> HLERequestContext::ReadBuffer(int buffer_index) const { } std::size_t HLERequestContext::WriteBuffer(const void* buffer, std::size_t size, - int buffer_index) const { + std::size_t buffer_index) const { if (size == 0) { LOG_WARNING(Core, "skip empty buffer write"); return 0; } - const bool is_buffer_b{BufferDescriptorB().size() > std::size_t(buffer_index) && + const bool is_buffer_b{BufferDescriptorB().size() > buffer_index && BufferDescriptorB()[buffer_index].Size()}; const std::size_t buffer_size{GetWriteBufferSize(buffer_index)}; if (size > buffer_size) { @@ -321,13 +321,13 @@ std::size_t HLERequestContext::WriteBuffer(const void* buffer, std::size_t size, auto& memory = Core::System::GetInstance().Memory(); if (is_buffer_b) { - ASSERT_MSG(BufferDescriptorB().size() > std::size_t(buffer_index), + ASSERT_MSG(BufferDescriptorB().size() > buffer_index, "BufferDescriptorB invalid buffer_index {}", buffer_index); ASSERT_MSG(BufferDescriptorB()[buffer_index].Size() >= size, "BufferDescriptorB buffer_index {} is not large enough", buffer_index); memory.WriteBlock(BufferDescriptorB()[buffer_index].Address(), buffer, size); } else { - ASSERT_MSG(BufferDescriptorC().size() > std::size_t(buffer_index), + ASSERT_MSG(BufferDescriptorC().size() > buffer_index, "BufferDescriptorC invalid buffer_index {}", buffer_index); ASSERT_MSG(BufferDescriptorC()[buffer_index].Size() >= size, "BufferDescriptorC buffer_index {} is not large enough", buffer_index); @@ -337,17 +337,17 @@ std::size_t HLERequestContext::WriteBuffer(const void* buffer, std::size_t size, return size; } -std::size_t HLERequestContext::GetReadBufferSize(int buffer_index) const { - const bool is_buffer_a{BufferDescriptorA().size() > std::size_t(buffer_index) && +std::size_t HLERequestContext::GetReadBufferSize(std::size_t buffer_index) const { + const bool is_buffer_a{BufferDescriptorA().size() > buffer_index && BufferDescriptorA()[buffer_index].Size()}; if (is_buffer_a) { - ASSERT_MSG(BufferDescriptorA().size() > std::size_t(buffer_index), + ASSERT_MSG(BufferDescriptorA().size() > buffer_index, "BufferDescriptorA invalid buffer_index {}", buffer_index); ASSERT_MSG(BufferDescriptorA()[buffer_index].Size() > 0, "BufferDescriptorA buffer_index {} is empty", buffer_index); return BufferDescriptorA()[buffer_index].Size(); } else { - ASSERT_MSG(BufferDescriptorX().size() > std::size_t(buffer_index), + ASSERT_MSG(BufferDescriptorX().size() > buffer_index, "BufferDescriptorX invalid buffer_index {}", buffer_index); ASSERT_MSG(BufferDescriptorX()[buffer_index].Size() > 0, "BufferDescriptorX buffer_index {} is empty", buffer_index); @@ -355,15 +355,15 @@ std::size_t HLERequestContext::GetReadBufferSize(int buffer_index) const { } } -std::size_t HLERequestContext::GetWriteBufferSize(int buffer_index) const { - const bool is_buffer_b{BufferDescriptorB().size() > std::size_t(buffer_index) && +std::size_t HLERequestContext::GetWriteBufferSize(std::size_t buffer_index) const { + const bool is_buffer_b{BufferDescriptorB().size() > buffer_index && BufferDescriptorB()[buffer_index].Size()}; if (is_buffer_b) { - ASSERT_MSG(BufferDescriptorB().size() > std::size_t(buffer_index), + ASSERT_MSG(BufferDescriptorB().size() > buffer_index, "BufferDescriptorB invalid buffer_index {}", buffer_index); return BufferDescriptorB()[buffer_index].Size(); } else { - ASSERT_MSG(BufferDescriptorC().size() > std::size_t(buffer_index), + ASSERT_MSG(BufferDescriptorC().size() > buffer_index, "BufferDescriptorC invalid buffer_index {}", buffer_index); return BufferDescriptorC()[buffer_index].Size(); } diff --git a/src/core/hle/kernel/hle_ipc.h b/src/core/hle/kernel/hle_ipc.h index 050ad8fd7..af3330297 100644 --- a/src/core/hle/kernel/hle_ipc.h +++ b/src/core/hle/kernel/hle_ipc.h @@ -179,10 +179,11 @@ public: } /// Helper function to read a buffer using the appropriate buffer descriptor - std::vector<u8> ReadBuffer(int buffer_index = 0) const; + std::vector<u8> ReadBuffer(std::size_t buffer_index = 0) const; /// Helper function to write a buffer using the appropriate buffer descriptor - std::size_t WriteBuffer(const void* buffer, std::size_t size, int buffer_index = 0) const; + std::size_t WriteBuffer(const void* buffer, std::size_t size, + std::size_t buffer_index = 0) const; /* Helper function to write a buffer using the appropriate buffer descriptor * @@ -194,7 +195,8 @@ public: */ template <typename ContiguousContainer, typename = std::enable_if_t<!std::is_pointer_v<ContiguousContainer>>> - std::size_t WriteBuffer(const ContiguousContainer& container, int buffer_index = 0) const { + std::size_t WriteBuffer(const ContiguousContainer& container, + std::size_t buffer_index = 0) const { using ContiguousType = typename ContiguousContainer::value_type; static_assert(std::is_trivially_copyable_v<ContiguousType>, @@ -205,10 +207,10 @@ public: } /// Helper function to get the size of the input buffer - std::size_t GetReadBufferSize(int buffer_index = 0) const; + std::size_t GetReadBufferSize(std::size_t buffer_index = 0) const; /// Helper function to get the size of the output buffer - std::size_t GetWriteBufferSize(int buffer_index = 0) const; + std::size_t GetWriteBufferSize(std::size_t buffer_index = 0) const; template <typename T> std::shared_ptr<T> GetCopyObject(std::size_t index) { diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index 014d647cf..7655382fa 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp @@ -18,15 +18,20 @@ #include "core/core.h" #include "core/core_timing.h" #include "core/core_timing_util.h" +#include "core/device_memory.h" #include "core/hardware_properties.h" #include "core/hle/kernel/client_port.h" #include "core/hle/kernel/errors.h" #include "core/hle/kernel/handle_table.h" #include "core/hle/kernel/kernel.h" +#include "core/hle/kernel/memory/memory_layout.h" +#include "core/hle/kernel/memory/memory_manager.h" +#include "core/hle/kernel/memory/slab_heap.h" #include "core/hle/kernel/physical_core.h" #include "core/hle/kernel/process.h" #include "core/hle/kernel/resource_limit.h" #include "core/hle/kernel/scheduler.h" +#include "core/hle/kernel/shared_memory.h" #include "core/hle/kernel/synchronization.h" #include "core/hle/kernel/thread.h" #include "core/hle/kernel/time_manager.h" @@ -110,6 +115,7 @@ struct KernelCore::Impl { InitializePhysicalCores(); InitializeSystemResourceLimit(kernel); + InitializeMemoryLayout(); InitializeThreads(); InitializePreemption(); } @@ -154,12 +160,17 @@ struct KernelCore::Impl { system_resource_limit = ResourceLimit::Create(kernel); // If setting the default system values fails, then something seriously wrong has occurred. - ASSERT(system_resource_limit->SetLimitValue(ResourceType::PhysicalMemory, 0x200000000) + ASSERT(system_resource_limit->SetLimitValue(ResourceType::PhysicalMemory, 0x100000000) .IsSuccess()); ASSERT(system_resource_limit->SetLimitValue(ResourceType::Threads, 800).IsSuccess()); ASSERT(system_resource_limit->SetLimitValue(ResourceType::Events, 700).IsSuccess()); ASSERT(system_resource_limit->SetLimitValue(ResourceType::TransferMemory, 200).IsSuccess()); ASSERT(system_resource_limit->SetLimitValue(ResourceType::Sessions, 900).IsSuccess()); + + if (!system_resource_limit->Reserve(ResourceType::PhysicalMemory, 0) || + !system_resource_limit->Reserve(ResourceType::PhysicalMemory, 0x60000)) { + UNREACHABLE(); + } } void InitializeThreads() { @@ -237,6 +248,57 @@ struct KernelCore::Impl { return result; } + void InitializeMemoryLayout() { + // Initialize memory layout + constexpr Memory::MemoryLayout layout{Memory::MemoryLayout::GetDefaultLayout()}; + constexpr std::size_t hid_size{0x40000}; + constexpr std::size_t font_size{0x1100000}; + constexpr std::size_t irs_size{0x8000}; + constexpr std::size_t time_size{0x1000}; + constexpr PAddr hid_addr{layout.System().StartAddress()}; + constexpr PAddr font_pa{layout.System().StartAddress() + hid_size}; + constexpr PAddr irs_addr{layout.System().StartAddress() + hid_size + font_size}; + constexpr PAddr time_addr{layout.System().StartAddress() + hid_size + font_size + irs_size}; + + // Initialize memory manager + memory_manager = std::make_unique<Memory::MemoryManager>(); + memory_manager->InitializeManager(Memory::MemoryManager::Pool::Application, + layout.Application().StartAddress(), + layout.Application().EndAddress()); + memory_manager->InitializeManager(Memory::MemoryManager::Pool::Applet, + layout.Applet().StartAddress(), + layout.Applet().EndAddress()); + memory_manager->InitializeManager(Memory::MemoryManager::Pool::System, + layout.System().StartAddress(), + layout.System().EndAddress()); + + hid_shared_mem = Kernel::SharedMemory::Create( + system.Kernel(), system.DeviceMemory(), nullptr, + {hid_addr, hid_size / Memory::PageSize}, Memory::MemoryPermission::None, + Memory::MemoryPermission::Read, hid_addr, hid_size, "HID:SharedMemory"); + font_shared_mem = Kernel::SharedMemory::Create( + system.Kernel(), system.DeviceMemory(), nullptr, + {font_pa, font_size / Memory::PageSize}, Memory::MemoryPermission::None, + Memory::MemoryPermission::Read, font_pa, font_size, "Font:SharedMemory"); + irs_shared_mem = Kernel::SharedMemory::Create( + system.Kernel(), system.DeviceMemory(), nullptr, + {irs_addr, irs_size / Memory::PageSize}, Memory::MemoryPermission::None, + Memory::MemoryPermission::Read, irs_addr, irs_size, "IRS:SharedMemory"); + time_shared_mem = Kernel::SharedMemory::Create( + system.Kernel(), system.DeviceMemory(), nullptr, + {time_addr, time_size / Memory::PageSize}, Memory::MemoryPermission::None, + Memory::MemoryPermission::Read, time_addr, time_size, "Time:SharedMemory"); + + // Allocate slab heaps + user_slab_heap_pages = std::make_unique<Memory::SlabHeap<Memory::Page>>(); + + // Initialize slab heaps + constexpr u64 user_slab_heap_size{0x3de000}; + user_slab_heap_pages->Initialize( + system.DeviceMemory().GetPointer(Core::DramMemoryMap::SlabHeapBase), + user_slab_heap_size); + } + std::atomic<u32> next_object_id{0}; std::atomic<u64> next_kernel_process_id{Process::InitialKIPIDMin}; std::atomic<u64> next_user_process_id{Process::ProcessIDMin}; @@ -271,6 +333,16 @@ struct KernelCore::Impl { std::bitset<Core::Hardware::NUM_CPU_CORES> registered_core_threads; std::mutex register_thread_mutex; + // Kernel memory management + std::unique_ptr<Memory::MemoryManager> memory_manager; + std::unique_ptr<Memory::SlabHeap<Memory::Page>> user_slab_heap_pages; + + // Shared memory for services + std::shared_ptr<Kernel::SharedMemory> hid_shared_mem; + std::shared_ptr<Kernel::SharedMemory> font_shared_mem; + std::shared_ptr<Kernel::SharedMemory> irs_shared_mem; + std::shared_ptr<Kernel::SharedMemory> time_shared_mem; + // System context Core::System& system; }; @@ -437,4 +509,52 @@ Core::EmuThreadHandle KernelCore::GetCurrentEmuThreadID() const { return impl->GetCurrentEmuThreadID(); } +Memory::MemoryManager& KernelCore::MemoryManager() { + return *impl->memory_manager; +} + +const Memory::MemoryManager& KernelCore::MemoryManager() const { + return *impl->memory_manager; +} + +Memory::SlabHeap<Memory::Page>& KernelCore::GetUserSlabHeapPages() { + return *impl->user_slab_heap_pages; +} + +const Memory::SlabHeap<Memory::Page>& KernelCore::GetUserSlabHeapPages() const { + return *impl->user_slab_heap_pages; +} + +Kernel::SharedMemory& KernelCore::GetHidSharedMem() { + return *impl->hid_shared_mem; +} + +const Kernel::SharedMemory& KernelCore::GetHidSharedMem() const { + return *impl->hid_shared_mem; +} + +Kernel::SharedMemory& KernelCore::GetFontSharedMem() { + return *impl->font_shared_mem; +} + +const Kernel::SharedMemory& KernelCore::GetFontSharedMem() const { + return *impl->font_shared_mem; +} + +Kernel::SharedMemory& KernelCore::GetIrsSharedMem() { + return *impl->irs_shared_mem; +} + +const Kernel::SharedMemory& KernelCore::GetIrsSharedMem() const { + return *impl->irs_shared_mem; +} + +Kernel::SharedMemory& KernelCore::GetTimeSharedMem() { + return *impl->time_shared_mem; +} + +const Kernel::SharedMemory& KernelCore::GetTimeSharedMem() const { + return *impl->time_shared_mem; +} + } // namespace Kernel diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h index c4f78ab71..83de1f542 100644 --- a/src/core/hle/kernel/kernel.h +++ b/src/core/hle/kernel/kernel.h @@ -8,6 +8,7 @@ #include <string> #include <unordered_map> #include <vector> +#include "core/hle/kernel/memory/memory_types.h" #include "core/hle/kernel/object.h" namespace Core { @@ -23,6 +24,12 @@ struct EventType; namespace Kernel { +namespace Memory { +class MemoryManager; +template <typename T> +class SlabHeap; +} // namespace Memory + class AddressArbiter; class ClientPort; class GlobalScheduler; @@ -31,6 +38,7 @@ class PhysicalCore; class Process; class ResourceLimit; class Scheduler; +class SharedMemory; class Synchronization; class Thread; class TimeManager; @@ -147,6 +155,42 @@ public: /// Register the current thread as a non CPU core thread. void RegisterHostThread(); + /// Gets the virtual memory manager for the kernel. + Memory::MemoryManager& MemoryManager(); + + /// Gets the virtual memory manager for the kernel. + const Memory::MemoryManager& MemoryManager() const; + + /// Gets the slab heap allocated for user space pages. + Memory::SlabHeap<Memory::Page>& GetUserSlabHeapPages(); + + /// Gets the slab heap allocated for user space pages. + const Memory::SlabHeap<Memory::Page>& GetUserSlabHeapPages() const; + + /// Gets the shared memory object for HID services. + Kernel::SharedMemory& GetHidSharedMem(); + + /// Gets the shared memory object for HID services. + const Kernel::SharedMemory& GetHidSharedMem() const; + + /// Gets the shared memory object for font services. + Kernel::SharedMemory& GetFontSharedMem(); + + /// Gets the shared memory object for font services. + const Kernel::SharedMemory& GetFontSharedMem() const; + + /// Gets the shared memory object for IRS services. + Kernel::SharedMemory& GetIrsSharedMem(); + + /// Gets the shared memory object for IRS services. + const Kernel::SharedMemory& GetIrsSharedMem() const; + + /// Gets the shared memory object for Time services. + Kernel::SharedMemory& GetTimeSharedMem(); + + /// Gets the shared memory object for Time services. + const Kernel::SharedMemory& GetTimeSharedMem() const; + private: friend class Object; friend class Process; diff --git a/src/core/hle/kernel/memory/address_space_info.cpp b/src/core/hle/kernel/memory/address_space_info.cpp new file mode 100644 index 000000000..27fae05e7 --- /dev/null +++ b/src/core/hle/kernel/memory/address_space_info.cpp @@ -0,0 +1,118 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +// This file references various implementation details from Atmosphère, an open-source firmware for +// the Nintendo Switch. Copyright 2018-2020 Atmosphère-NX. + +#include <array> + +#include "common/assert.h" +#include "core/hle/kernel/memory/address_space_info.h" + +namespace Kernel::Memory { + +namespace { + +enum : u64 { + Size_1_MB = 0x100000, + Size_2_MB = 2 * Size_1_MB, + Size_128_MB = 128 * Size_1_MB, + Size_1_GB = 0x40000000, + Size_2_GB = 2 * Size_1_GB, + Size_4_GB = 4 * Size_1_GB, + Size_6_GB = 6 * Size_1_GB, + Size_64_GB = 64 * Size_1_GB, + Size_512_GB = 512 * Size_1_GB, + Invalid = std::numeric_limits<u64>::max(), +}; + +// clang-format off +constexpr std::array<AddressSpaceInfo, 13> AddressSpaceInfos{{ + { 32 /*bit_width*/, Size_2_MB /*addr*/, Size_1_GB - Size_2_MB /*size*/, AddressSpaceInfo::Type::Is32Bit, }, + { 32 /*bit_width*/, Size_1_GB /*addr*/, Size_4_GB - Size_1_GB /*size*/, AddressSpaceInfo::Type::Small64Bit, }, + { 32 /*bit_width*/, Invalid /*addr*/, Size_1_GB /*size*/, AddressSpaceInfo::Type::Heap, }, + { 32 /*bit_width*/, Invalid /*addr*/, Size_1_GB /*size*/, AddressSpaceInfo::Type::Alias, }, + { 36 /*bit_width*/, Size_128_MB /*addr*/, Size_2_GB - Size_128_MB /*size*/, AddressSpaceInfo::Type::Is32Bit, }, + { 36 /*bit_width*/, Size_2_GB /*addr*/, Size_64_GB - Size_2_GB /*size*/, AddressSpaceInfo::Type::Small64Bit, }, + { 36 /*bit_width*/, Invalid /*addr*/, Size_6_GB /*size*/, AddressSpaceInfo::Type::Heap, }, + { 36 /*bit_width*/, Invalid /*addr*/, Size_6_GB /*size*/, AddressSpaceInfo::Type::Alias, }, + { 39 /*bit_width*/, Size_128_MB /*addr*/, Size_512_GB - Size_128_MB /*size*/, AddressSpaceInfo::Type::Large64Bit, }, + { 39 /*bit_width*/, Invalid /*addr*/, Size_64_GB /*size*/, AddressSpaceInfo::Type::Is32Bit }, + { 39 /*bit_width*/, Invalid /*addr*/, Size_6_GB /*size*/, AddressSpaceInfo::Type::Heap, }, + { 39 /*bit_width*/, Invalid /*addr*/, Size_64_GB /*size*/, AddressSpaceInfo::Type::Alias, }, + { 39 /*bit_width*/, Invalid /*addr*/, Size_2_GB /*size*/, AddressSpaceInfo::Type::Stack, }, +}}; +// clang-format on + +constexpr bool IsAllowedIndexForAddress(std::size_t index) { + return index < std::size(AddressSpaceInfos) && AddressSpaceInfos[index].GetAddress() != Invalid; +} + +constexpr std::size_t + AddressSpaceIndices32Bit[static_cast<std::size_t>(AddressSpaceInfo::Type::Count)]{ + 0, 1, 0, 2, 0, 3, + }; + +constexpr std::size_t + AddressSpaceIndices36Bit[static_cast<std::size_t>(AddressSpaceInfo::Type::Count)]{ + 4, 5, 4, 6, 4, 7, + }; + +constexpr std::size_t + AddressSpaceIndices39Bit[static_cast<std::size_t>(AddressSpaceInfo::Type::Count)]{ + 9, 8, 8, 10, 12, 11, + }; + +constexpr bool IsAllowed32BitType(AddressSpaceInfo::Type type) { + return type < AddressSpaceInfo::Type::Count && type != AddressSpaceInfo::Type::Large64Bit && + type != AddressSpaceInfo::Type::Stack; +} + +constexpr bool IsAllowed36BitType(AddressSpaceInfo::Type type) { + return type < AddressSpaceInfo::Type::Count && type != AddressSpaceInfo::Type::Large64Bit && + type != AddressSpaceInfo::Type::Stack; +} + +constexpr bool IsAllowed39BitType(AddressSpaceInfo::Type type) { + return type < AddressSpaceInfo::Type::Count && type != AddressSpaceInfo::Type::Small64Bit; +} + +} // namespace + +u64 AddressSpaceInfo::GetAddressSpaceStart(std::size_t width, AddressSpaceInfo::Type type) { + const std::size_t index{static_cast<std::size_t>(type)}; + switch (width) { + case 32: + ASSERT(IsAllowed32BitType(type)); + ASSERT(IsAllowedIndexForAddress(AddressSpaceIndices32Bit[index])); + return AddressSpaceInfos[AddressSpaceIndices32Bit[index]].GetAddress(); + case 36: + ASSERT(IsAllowed36BitType(type)); + ASSERT(IsAllowedIndexForAddress(AddressSpaceIndices36Bit[index])); + return AddressSpaceInfos[AddressSpaceIndices36Bit[index]].GetAddress(); + case 39: + ASSERT(IsAllowed39BitType(type)); + ASSERT(IsAllowedIndexForAddress(AddressSpaceIndices39Bit[index])); + return AddressSpaceInfos[AddressSpaceIndices39Bit[index]].GetAddress(); + } + UNREACHABLE(); +} + +std::size_t AddressSpaceInfo::GetAddressSpaceSize(std::size_t width, AddressSpaceInfo::Type type) { + const std::size_t index{static_cast<std::size_t>(type)}; + switch (width) { + case 32: + ASSERT(IsAllowed32BitType(type)); + return AddressSpaceInfos[AddressSpaceIndices32Bit[index]].GetSize(); + case 36: + ASSERT(IsAllowed36BitType(type)); + return AddressSpaceInfos[AddressSpaceIndices36Bit[index]].GetSize(); + case 39: + ASSERT(IsAllowed39BitType(type)); + return AddressSpaceInfos[AddressSpaceIndices39Bit[index]].GetSize(); + } + UNREACHABLE(); +} + +} // namespace Kernel::Memory diff --git a/src/core/hle/kernel/memory/address_space_info.h b/src/core/hle/kernel/memory/address_space_info.h new file mode 100644 index 000000000..cc9a6421e --- /dev/null +++ b/src/core/hle/kernel/memory/address_space_info.h @@ -0,0 +1,54 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +// This file references various implementation details from Atmosphère, an open-source firmware for +// the Nintendo Switch. Copyright 2018-2020 Atmosphère-NX. + +#pragma once + +#include "common/common_funcs.h" +#include "common/common_types.h" + +namespace Kernel::Memory { + +class AddressSpaceInfo final : NonCopyable { +public: + enum class Type : u32 { + Is32Bit = 0, + Small64Bit = 1, + Large64Bit = 2, + Heap = 3, + Stack = 4, + Alias = 5, + Count, + }; + +private: + std::size_t bit_width{}; + std::size_t addr{}; + std::size_t size{}; + Type type{}; + +public: + static u64 GetAddressSpaceStart(std::size_t width, Type type); + static std::size_t GetAddressSpaceSize(std::size_t width, Type type); + + constexpr AddressSpaceInfo(std::size_t bit_width, std::size_t addr, std::size_t size, Type type) + : bit_width{bit_width}, addr{addr}, size{size}, type{type} {} + + constexpr std::size_t GetWidth() const { + return bit_width; + } + constexpr std::size_t GetAddress() const { + return addr; + } + constexpr std::size_t GetSize() const { + return size; + } + constexpr Type GetType() const { + return type; + } +}; + +} // namespace Kernel::Memory diff --git a/src/core/hle/kernel/memory/memory_block.h b/src/core/hle/kernel/memory/memory_block.h new file mode 100644 index 000000000..e11043b60 --- /dev/null +++ b/src/core/hle/kernel/memory/memory_block.h @@ -0,0 +1,318 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +// This file references various implementation details from Atmosphère, an open-source firmware for +// the Nintendo Switch. Copyright 2018-2020 Atmosphère-NX. + +#pragma once + +#include "common/alignment.h" +#include "common/assert.h" +#include "common/common_types.h" +#include "core/hle/kernel/memory/memory_types.h" +#include "core/hle/kernel/svc_types.h" + +namespace Kernel::Memory { + +enum class MemoryState : u32 { + None = 0, + Mask = 0xFFFFFFFF, // TODO(bunnei): This should probable be 0xFF + All = ~None, + + FlagCanReprotect = (1 << 8), + FlagCanDebug = (1 << 9), + FlagCanUseIpc = (1 << 10), + FlagCanUseNonDeviceIpc = (1 << 11), + FlagCanUseNonSecureIpc = (1 << 12), + FlagMapped = (1 << 13), + FlagCode = (1 << 14), + FlagCanAlias = (1 << 15), + FlagCanCodeAlias = (1 << 16), + FlagCanTransfer = (1 << 17), + FlagCanQueryPhysical = (1 << 18), + FlagCanDeviceMap = (1 << 19), + FlagCanAlignedDeviceMap = (1 << 20), + FlagCanIpcUserBuffer = (1 << 21), + FlagReferenceCounted = (1 << 22), + FlagCanMapProcess = (1 << 23), + FlagCanChangeAttribute = (1 << 24), + FlagCanCodeMemory = (1 << 25), + + FlagsData = FlagCanReprotect | FlagCanUseIpc | FlagCanUseNonDeviceIpc | FlagCanUseNonSecureIpc | + FlagMapped | FlagCanAlias | FlagCanTransfer | FlagCanQueryPhysical | + FlagCanDeviceMap | FlagCanAlignedDeviceMap | FlagCanIpcUserBuffer | + FlagReferenceCounted | FlagCanChangeAttribute, + + FlagsCode = FlagCanDebug | FlagCanUseIpc | FlagCanUseNonDeviceIpc | FlagCanUseNonSecureIpc | + FlagMapped | FlagCode | FlagCanQueryPhysical | FlagCanDeviceMap | + FlagCanAlignedDeviceMap | FlagReferenceCounted, + + FlagsMisc = FlagMapped | FlagReferenceCounted | FlagCanQueryPhysical | FlagCanDeviceMap, + + Free = static_cast<u32>(Svc::MemoryState::Free), + Io = static_cast<u32>(Svc::MemoryState::Io) | FlagMapped, + Static = static_cast<u32>(Svc::MemoryState::Static) | FlagMapped | FlagCanQueryPhysical, + Code = static_cast<u32>(Svc::MemoryState::Code) | FlagsCode | FlagCanMapProcess, + CodeData = static_cast<u32>(Svc::MemoryState::CodeData) | FlagsData | FlagCanMapProcess | + FlagCanCodeMemory, + Shared = static_cast<u32>(Svc::MemoryState::Shared) | FlagMapped | FlagReferenceCounted, + Normal = static_cast<u32>(Svc::MemoryState::Normal) | FlagsData | FlagCanCodeMemory, + + AliasCode = static_cast<u32>(Svc::MemoryState::AliasCode) | FlagsCode | FlagCanMapProcess | + FlagCanCodeAlias, + AliasCodeData = static_cast<u32>(Svc::MemoryState::AliasCodeData) | FlagsData | + FlagCanMapProcess | FlagCanCodeAlias | FlagCanCodeMemory, + + Ipc = static_cast<u32>(Svc::MemoryState::Ipc) | FlagsMisc | FlagCanAlignedDeviceMap | + FlagCanUseIpc | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc, + + Stack = static_cast<u32>(Svc::MemoryState::Stack) | FlagsMisc | FlagCanAlignedDeviceMap | + FlagCanUseIpc | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc, + + ThreadLocal = + static_cast<u32>(Svc::MemoryState::ThreadLocal) | FlagMapped | FlagReferenceCounted, + + Transfered = static_cast<u32>(Svc::MemoryState::Transfered) | FlagsMisc | + FlagCanAlignedDeviceMap | FlagCanChangeAttribute | FlagCanUseIpc | + FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc, + + SharedTransfered = static_cast<u32>(Svc::MemoryState::SharedTransfered) | FlagsMisc | + FlagCanAlignedDeviceMap | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc, + + SharedCode = static_cast<u32>(Svc::MemoryState::SharedCode) | FlagMapped | + FlagReferenceCounted | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc, + + Inaccessible = static_cast<u32>(Svc::MemoryState::Inaccessible), + + NonSecureIpc = static_cast<u32>(Svc::MemoryState::NonSecureIpc) | FlagsMisc | + FlagCanAlignedDeviceMap | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc, + + NonDeviceIpc = + static_cast<u32>(Svc::MemoryState::NonDeviceIpc) | FlagsMisc | FlagCanUseNonDeviceIpc, + + Kernel = static_cast<u32>(Svc::MemoryState::Kernel) | FlagMapped, + + GeneratedCode = static_cast<u32>(Svc::MemoryState::GeneratedCode) | FlagMapped | + FlagReferenceCounted | FlagCanDebug, + CodeOut = static_cast<u32>(Svc::MemoryState::CodeOut) | FlagMapped | FlagReferenceCounted, +}; +DECLARE_ENUM_FLAG_OPERATORS(MemoryState); + +static_assert(static_cast<u32>(MemoryState::Free) == 0x00000000); +static_assert(static_cast<u32>(MemoryState::Io) == 0x00002001); +static_assert(static_cast<u32>(MemoryState::Static) == 0x00042002); +static_assert(static_cast<u32>(MemoryState::Code) == 0x00DC7E03); +static_assert(static_cast<u32>(MemoryState::CodeData) == 0x03FEBD04); +static_assert(static_cast<u32>(MemoryState::Normal) == 0x037EBD05); +static_assert(static_cast<u32>(MemoryState::Shared) == 0x00402006); +static_assert(static_cast<u32>(MemoryState::AliasCode) == 0x00DD7E08); +static_assert(static_cast<u32>(MemoryState::AliasCodeData) == 0x03FFBD09); +static_assert(static_cast<u32>(MemoryState::Ipc) == 0x005C3C0A); +static_assert(static_cast<u32>(MemoryState::Stack) == 0x005C3C0B); +static_assert(static_cast<u32>(MemoryState::ThreadLocal) == 0x0040200C); +static_assert(static_cast<u32>(MemoryState::Transfered) == 0x015C3C0D); +static_assert(static_cast<u32>(MemoryState::SharedTransfered) == 0x005C380E); +static_assert(static_cast<u32>(MemoryState::SharedCode) == 0x0040380F); +static_assert(static_cast<u32>(MemoryState::Inaccessible) == 0x00000010); +static_assert(static_cast<u32>(MemoryState::NonSecureIpc) == 0x005C3811); +static_assert(static_cast<u32>(MemoryState::NonDeviceIpc) == 0x004C2812); +static_assert(static_cast<u32>(MemoryState::Kernel) == 0x00002013); +static_assert(static_cast<u32>(MemoryState::GeneratedCode) == 0x00402214); +static_assert(static_cast<u32>(MemoryState::CodeOut) == 0x00402015); + +enum class MemoryPermission : u8 { + None = 0, + Mask = static_cast<u8>(~None), + + Read = 1 << 0, + Write = 1 << 1, + Execute = 1 << 2, + + ReadAndWrite = Read | Write, + ReadAndExecute = Read | Execute, + + UserMask = static_cast<u8>(Svc::MemoryPermission::Read | Svc::MemoryPermission::Write | + Svc::MemoryPermission::Execute), +}; +DECLARE_ENUM_FLAG_OPERATORS(MemoryPermission); + +enum class MemoryAttribute : u8 { + None = 0x00, + Mask = 0x7F, + All = Mask, + DontCareMask = 0x80, + + Locked = static_cast<u8>(Svc::MemoryAttribute::Locked), + IpcLocked = static_cast<u8>(Svc::MemoryAttribute::IpcLocked), + DeviceShared = static_cast<u8>(Svc::MemoryAttribute::DeviceShared), + Uncached = static_cast<u8>(Svc::MemoryAttribute::Uncached), + + IpcAndDeviceMapped = IpcLocked | DeviceShared, + LockedAndIpcLocked = Locked | IpcLocked, + DeviceSharedAndUncached = DeviceShared | Uncached +}; +DECLARE_ENUM_FLAG_OPERATORS(MemoryAttribute); + +static_assert((static_cast<u8>(MemoryAttribute::Mask) & + static_cast<u8>(MemoryAttribute::DontCareMask)) == 0); + +struct MemoryInfo { + VAddr addr{}; + std::size_t size{}; + MemoryState state{}; + MemoryPermission perm{}; + MemoryAttribute attribute{}; + MemoryPermission original_perm{}; + u16 ipc_lock_count{}; + u16 device_use_count{}; + + constexpr Svc::MemoryInfo GetSvcMemoryInfo() const { + return { + addr, + size, + static_cast<Svc::MemoryState>(state & MemoryState::Mask), + static_cast<Svc::MemoryAttribute>(attribute & MemoryAttribute::Mask), + static_cast<Svc::MemoryPermission>(perm & MemoryPermission::UserMask), + ipc_lock_count, + device_use_count, + }; + } + + constexpr VAddr GetAddress() const { + return addr; + } + constexpr std::size_t GetSize() const { + return size; + } + constexpr std::size_t GetNumPages() const { + return GetSize() / PageSize; + } + constexpr VAddr GetEndAddress() const { + return GetAddress() + GetSize(); + } + constexpr VAddr GetLastAddress() const { + return GetEndAddress() - 1; + } +}; + +class MemoryBlock final { + friend class MemoryBlockManager; + +private: + VAddr addr{}; + std::size_t num_pages{}; + MemoryState state{MemoryState::None}; + u16 ipc_lock_count{}; + u16 device_use_count{}; + MemoryPermission perm{MemoryPermission::None}; + MemoryPermission original_perm{MemoryPermission::None}; + MemoryAttribute attribute{MemoryAttribute::None}; + +public: + static constexpr int Compare(const MemoryBlock& lhs, const MemoryBlock& rhs) { + if (lhs.GetAddress() < rhs.GetAddress()) { + return -1; + } else if (lhs.GetAddress() <= rhs.GetLastAddress()) { + return 0; + } else { + return 1; + } + } + +public: + constexpr MemoryBlock() = default; + constexpr MemoryBlock(VAddr addr, std::size_t num_pages, MemoryState state, + MemoryPermission perm, MemoryAttribute attribute) + : addr{addr}, num_pages(num_pages), state{state}, perm{perm}, attribute{attribute} {} + + constexpr VAddr GetAddress() const { + return addr; + } + + constexpr std::size_t GetNumPages() const { + return num_pages; + } + + constexpr std::size_t GetSize() const { + return GetNumPages() * PageSize; + } + + constexpr VAddr GetEndAddress() const { + return GetAddress() + GetSize(); + } + + constexpr VAddr GetLastAddress() const { + return GetEndAddress() - 1; + } + + constexpr MemoryInfo GetMemoryInfo() const { + return { + GetAddress(), GetSize(), state, perm, + attribute, original_perm, ipc_lock_count, device_use_count, + }; + } + +private: + constexpr bool HasProperties(MemoryState s, MemoryPermission p, MemoryAttribute a) const { + constexpr MemoryAttribute AttributeIgnoreMask{MemoryAttribute::DontCareMask | + MemoryAttribute::IpcLocked | + MemoryAttribute::DeviceShared}; + return state == s && perm == p && + (attribute | AttributeIgnoreMask) == (a | AttributeIgnoreMask); + } + + constexpr bool HasSameProperties(const MemoryBlock& rhs) const { + return state == rhs.state && perm == rhs.perm && original_perm == rhs.original_perm && + attribute == rhs.attribute && ipc_lock_count == rhs.ipc_lock_count && + device_use_count == rhs.device_use_count; + } + + constexpr bool Contains(VAddr start) const { + return GetAddress() <= start && start <= GetEndAddress(); + } + + constexpr void Add(std::size_t count) { + ASSERT(count > 0); + ASSERT(GetAddress() + count * PageSize - 1 < GetEndAddress() + count * PageSize - 1); + + num_pages += count; + } + + constexpr void Update(MemoryState new_state, MemoryPermission new_perm, + MemoryAttribute new_attribute) { + ASSERT(original_perm == MemoryPermission::None); + ASSERT((attribute & MemoryAttribute::IpcLocked) == MemoryAttribute::None); + + state = new_state; + perm = new_perm; + + // TODO(bunnei): Is this right? + attribute = static_cast<MemoryAttribute>( + new_attribute /*| (attribute & (MemoryAttribute::IpcLocked | MemoryAttribute::DeviceShared))*/); + } + + constexpr MemoryBlock Split(VAddr split_addr) { + ASSERT(GetAddress() < split_addr); + ASSERT(Contains(split_addr)); + ASSERT(Common::IsAligned(split_addr, PageSize)); + + MemoryBlock block; + block.addr = addr; + block.num_pages = (split_addr - GetAddress()) / PageSize; + block.state = state; + block.ipc_lock_count = ipc_lock_count; + block.device_use_count = device_use_count; + block.perm = perm; + block.original_perm = original_perm; + block.attribute = attribute; + + addr = split_addr; + num_pages -= block.num_pages; + + return block; + } +}; +static_assert(std::is_trivially_destructible<MemoryBlock>::value); + +} // namespace Kernel::Memory diff --git a/src/core/hle/kernel/memory/memory_block_manager.cpp b/src/core/hle/kernel/memory/memory_block_manager.cpp new file mode 100644 index 000000000..1ebc126c0 --- /dev/null +++ b/src/core/hle/kernel/memory/memory_block_manager.cpp @@ -0,0 +1,190 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "core/hle/kernel/memory/memory_block_manager.h" +#include "core/hle/kernel/memory/memory_types.h" + +namespace Kernel::Memory { + +MemoryBlockManager::MemoryBlockManager(VAddr start_addr, VAddr end_addr) + : start_addr{start_addr}, end_addr{end_addr} { + const u64 num_pages{(end_addr - start_addr) / PageSize}; + memory_block_tree.emplace_back(start_addr, num_pages, MemoryState::Free, MemoryPermission::None, + MemoryAttribute::None); +} + +MemoryBlockManager::iterator MemoryBlockManager::FindIterator(VAddr addr) { + auto node{memory_block_tree.begin()}; + while (node != end()) { + const VAddr end_addr{node->GetNumPages() * PageSize + node->GetAddress()}; + if (node->GetAddress() <= addr && end_addr - 1 >= addr) { + return node; + } + node = std::next(node); + } + return end(); +} + +VAddr MemoryBlockManager::FindFreeArea(VAddr region_start, std::size_t region_num_pages, + std::size_t num_pages, std::size_t align, std::size_t offset, + std::size_t guard_pages) { + if (num_pages == 0) { + return {}; + } + + const VAddr region_end{region_start + region_num_pages * PageSize}; + const VAddr region_last{region_end - 1}; + for (auto it{FindIterator(region_start)}; it != memory_block_tree.cend(); it++) { + const auto info{it->GetMemoryInfo()}; + if (region_last < info.GetAddress()) { + break; + } + + if (info.state != MemoryState::Free) { + continue; + } + + VAddr area{(info.GetAddress() <= region_start) ? region_start : info.GetAddress()}; + area += guard_pages * PageSize; + + const VAddr offset_area{Common::AlignDown(area, align) + offset}; + area = (area <= offset_area) ? offset_area : offset_area + align; + + const VAddr area_end{area + num_pages * PageSize + guard_pages * PageSize}; + const VAddr area_last{area_end - 1}; + + if (info.GetAddress() <= area && area < area_last && area_last <= region_last && + area_last <= info.GetLastAddress()) { + return area; + } + } + + return {}; +} + +void MemoryBlockManager::Update(VAddr addr, std::size_t num_pages, MemoryState prev_state, + MemoryPermission prev_perm, MemoryAttribute prev_attribute, + MemoryState state, MemoryPermission perm, + MemoryAttribute attribute) { + const std::size_t prev_count{memory_block_tree.size()}; + const VAddr end_addr{addr + num_pages * PageSize}; + iterator node{memory_block_tree.begin()}; + + prev_attribute |= MemoryAttribute::IpcAndDeviceMapped; + + while (node != memory_block_tree.end()) { + MemoryBlock* block{&(*node)}; + iterator next_node{std::next(node)}; + const VAddr cur_addr{block->GetAddress()}; + const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr}; + + if (addr < cur_end_addr && cur_addr < end_addr) { + if (!block->HasProperties(prev_state, prev_perm, prev_attribute)) { + node = next_node; + continue; + } + + iterator new_node{node}; + if (addr > cur_addr) { + memory_block_tree.insert(node, block->Split(addr)); + } + + if (end_addr < cur_end_addr) { + new_node = memory_block_tree.insert(node, block->Split(end_addr)); + } + + new_node->Update(state, perm, attribute); + + MergeAdjacent(new_node, next_node); + } + + if (cur_end_addr - 1 >= end_addr - 1) { + break; + } + + node = next_node; + } +} + +void MemoryBlockManager::Update(VAddr addr, std::size_t num_pages, MemoryState state, + MemoryPermission perm, MemoryAttribute attribute) { + const std::size_t prev_count{memory_block_tree.size()}; + const VAddr end_addr{addr + num_pages * PageSize}; + iterator node{memory_block_tree.begin()}; + + while (node != memory_block_tree.end()) { + MemoryBlock* block{&(*node)}; + iterator next_node{std::next(node)}; + const VAddr cur_addr{block->GetAddress()}; + const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr}; + + if (addr < cur_end_addr && cur_addr < end_addr) { + iterator new_node{node}; + + if (addr > cur_addr) { + memory_block_tree.insert(node, block->Split(addr)); + } + + if (end_addr < cur_end_addr) { + new_node = memory_block_tree.insert(node, block->Split(end_addr)); + } + + new_node->Update(state, perm, attribute); + + MergeAdjacent(new_node, next_node); + } + + if (cur_end_addr - 1 >= end_addr - 1) { + break; + } + + node = next_node; + } +} + +void MemoryBlockManager::IterateForRange(VAddr start, VAddr end, IterateFunc&& func) { + const_iterator it{FindIterator(start)}; + MemoryInfo info{}; + do { + info = it->GetMemoryInfo(); + func(info); + it = std::next(it); + } while (info.addr + info.size - 1 < end - 1 && it != cend()); +} + +void MemoryBlockManager::MergeAdjacent(iterator it, iterator& next_it) { + MemoryBlock* block{&(*it)}; + + auto EraseIt = [&](const iterator it_to_erase) { + if (next_it == it_to_erase) { + next_it = std::next(next_it); + } + memory_block_tree.erase(it_to_erase); + }; + + if (it != memory_block_tree.begin()) { + MemoryBlock* prev{&(*std::prev(it))}; + + if (block->HasSameProperties(*prev)) { + const iterator prev_it{std::prev(it)}; + + prev->Add(block->GetNumPages()); + EraseIt(it); + + it = prev_it; + block = prev; + } + } + + if (it != cend()) { + const MemoryBlock* const next{&(*std::next(it))}; + + if (block->HasSameProperties(*next)) { + block->Add(next->GetNumPages()); + EraseIt(std::next(it)); + } + } +} + +} // namespace Kernel::Memory diff --git a/src/core/hle/kernel/memory/memory_block_manager.h b/src/core/hle/kernel/memory/memory_block_manager.h new file mode 100644 index 000000000..0f2270f0f --- /dev/null +++ b/src/core/hle/kernel/memory/memory_block_manager.h @@ -0,0 +1,64 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include <functional> +#include <list> +#include <memory> + +#include "common/common_types.h" +#include "core/hle/kernel/memory/memory_block.h" + +namespace Kernel::Memory { + +class MemoryBlockManager final { +public: + using MemoryBlockTree = std::list<MemoryBlock>; + using iterator = MemoryBlockTree::iterator; + using const_iterator = MemoryBlockTree::const_iterator; + +public: + MemoryBlockManager(VAddr start_addr, VAddr end_addr); + + iterator end() { + return memory_block_tree.end(); + } + const_iterator end() const { + return memory_block_tree.end(); + } + const_iterator cend() const { + return memory_block_tree.cend(); + } + + iterator FindIterator(VAddr addr); + + VAddr FindFreeArea(VAddr region_start, std::size_t region_num_pages, std::size_t num_pages, + std::size_t align, std::size_t offset, std::size_t guard_pages); + + void Update(VAddr addr, std::size_t num_pages, MemoryState prev_state, + MemoryPermission prev_perm, MemoryAttribute prev_attribute, MemoryState state, + MemoryPermission perm, MemoryAttribute attribute); + + void Update(VAddr addr, std::size_t num_pages, MemoryState state, + MemoryPermission perm = MemoryPermission::None, + MemoryAttribute attribute = MemoryAttribute::None); + + using IterateFunc = std::function<void(const MemoryInfo&)>; + void IterateForRange(VAddr start, VAddr end, IterateFunc&& func); + + MemoryBlock& FindBlock(VAddr addr) { + return *FindIterator(addr); + } + +private: + void MergeAdjacent(iterator it, iterator& next_it); + + const VAddr start_addr; + const VAddr end_addr; + + MemoryBlockTree memory_block_tree; +}; + +} // namespace Kernel::Memory diff --git a/src/core/hle/kernel/memory/memory_layout.h b/src/core/hle/kernel/memory/memory_layout.h new file mode 100644 index 000000000..830c6f0d7 --- /dev/null +++ b/src/core/hle/kernel/memory/memory_layout.h @@ -0,0 +1,73 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include "common/common_types.h" + +namespace Kernel::Memory { + +class MemoryRegion final { + friend class MemoryLayout; + +public: + constexpr PAddr StartAddress() const { + return start_address; + } + + constexpr PAddr EndAddress() const { + return end_address; + } + +private: + constexpr MemoryRegion() = default; + constexpr MemoryRegion(PAddr start_address, PAddr end_address) + : start_address{start_address}, end_address{end_address} {} + + const PAddr start_address{}; + const PAddr end_address{}; +}; + +class MemoryLayout final { +public: + constexpr const MemoryRegion& Application() const { + return application; + } + + constexpr const MemoryRegion& Applet() const { + return applet; + } + + constexpr const MemoryRegion& System() const { + return system; + } + + static constexpr MemoryLayout GetDefaultLayout() { + constexpr std::size_t application_size{0xcd500000}; + constexpr std::size_t applet_size{0x1fb00000}; + constexpr PAddr application_start_address{Core::DramMemoryMap::End - application_size}; + constexpr PAddr application_end_address{Core::DramMemoryMap::End}; + constexpr PAddr applet_start_address{application_start_address - applet_size}; + constexpr PAddr applet_end_address{applet_start_address + applet_size}; + constexpr PAddr system_start_address{Core::DramMemoryMap::SlabHeapEnd}; + constexpr PAddr system_end_address{applet_start_address}; + return {application_start_address, application_end_address, applet_start_address, + applet_end_address, system_start_address, system_end_address}; + } + +private: + constexpr MemoryLayout(PAddr application_start_address, std::size_t application_size, + PAddr applet_start_address, std::size_t applet_size, + PAddr system_start_address, std::size_t system_size) + : application{application_start_address, application_size}, + applet{applet_start_address, applet_size}, system{system_start_address, system_size} {} + + const MemoryRegion application; + const MemoryRegion applet; + const MemoryRegion system; + + const PAddr start_address{}; +}; + +} // namespace Kernel::Memory diff --git a/src/core/hle/kernel/memory/memory_manager.cpp b/src/core/hle/kernel/memory/memory_manager.cpp new file mode 100644 index 000000000..3cd4f9e85 --- /dev/null +++ b/src/core/hle/kernel/memory/memory_manager.cpp @@ -0,0 +1,176 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include <algorithm> + +#include "common/alignment.h" +#include "common/assert.h" +#include "common/common_types.h" +#include "common/scope_exit.h" +#include "core/hle/kernel/errors.h" +#include "core/hle/kernel/memory/memory_manager.h" +#include "core/hle/kernel/memory/page_linked_list.h" + +namespace Kernel::Memory { + +std::size_t MemoryManager::Impl::Initialize(Pool new_pool, u64 start_address, u64 end_address) { + const auto size{end_address - start_address}; + + // Calculate metadata sizes + const auto ref_count_size{(size / PageSize) * sizeof(u16)}; + const auto optimize_map_size{(Common::AlignUp((size / PageSize), 64) / 64) * sizeof(u64)}; + const auto manager_size{Common::AlignUp(optimize_map_size + ref_count_size, PageSize)}; + const auto page_heap_size{PageHeap::CalculateMetadataOverheadSize(size)}; + const auto total_metadata_size{manager_size + page_heap_size}; + ASSERT(manager_size <= total_metadata_size); + ASSERT(Common::IsAligned(total_metadata_size, PageSize)); + + // Setup region + pool = new_pool; + + // Initialize the manager's KPageHeap + heap.Initialize(start_address, size, page_heap_size); + + // Free the memory to the heap + heap.Free(start_address, size / PageSize); + + // Update the heap's used size + heap.UpdateUsedSize(); + + return total_metadata_size; +} + +void MemoryManager::InitializeManager(Pool pool, u64 start_address, u64 end_address) { + ASSERT(pool < Pool::Count); + managers[static_cast<std::size_t>(pool)].Initialize(pool, start_address, end_address); +} + +VAddr MemoryManager::AllocateContinuous(std::size_t num_pages, std::size_t align_pages, Pool pool, + Direction dir) { + // Early return if we're allocating no pages + if (num_pages == 0) { + return {}; + } + + // Lock the pool that we're allocating from + const auto pool_index{static_cast<std::size_t>(pool)}; + std::lock_guard lock{pool_locks[pool_index]}; + + // Choose a heap based on our page size request + const s32 heap_index{PageHeap::GetAlignedBlockIndex(num_pages, align_pages)}; + + // Loop, trying to iterate from each block + // TODO (bunnei): Support multiple managers + Impl& chosen_manager{managers[pool_index]}; + VAddr allocated_block{chosen_manager.AllocateBlock(heap_index)}; + + // If we failed to allocate, quit now + if (!allocated_block) { + return {}; + } + + // If we allocated more than we need, free some + const auto allocated_pages{PageHeap::GetBlockNumPages(heap_index)}; + if (allocated_pages > num_pages) { + chosen_manager.Free(allocated_block + num_pages * PageSize, allocated_pages - num_pages); + } + + return allocated_block; +} + +ResultCode MemoryManager::Allocate(PageLinkedList& page_list, std::size_t num_pages, Pool pool, + Direction dir) { + ASSERT(page_list.GetNumPages() == 0); + + // Early return if we're allocating no pages + if (num_pages == 0) { + return RESULT_SUCCESS; + } + + // Lock the pool that we're allocating from + const auto pool_index{static_cast<std::size_t>(pool)}; + std::lock_guard lock{pool_locks[pool_index]}; + + // Choose a heap based on our page size request + const s32 heap_index{PageHeap::GetBlockIndex(num_pages)}; + if (heap_index < 0) { + return ERR_OUT_OF_MEMORY; + } + + // TODO (bunnei): Support multiple managers + Impl& chosen_manager{managers[pool_index]}; + + // Ensure that we don't leave anything un-freed + auto group_guard = detail::ScopeExit([&] { + for (const auto& it : page_list.Nodes()) { + const auto num_pages{std::min( + it.GetNumPages(), (chosen_manager.GetEndAddress() - it.GetAddress()) / PageSize)}; + chosen_manager.Free(it.GetAddress(), num_pages); + } + }); + + // Keep allocating until we've allocated all our pages + for (s32 index{heap_index}; index >= 0 && num_pages > 0; index--) { + const auto pages_per_alloc{PageHeap::GetBlockNumPages(index)}; + + while (num_pages >= pages_per_alloc) { + // Allocate a block + VAddr allocated_block{chosen_manager.AllocateBlock(index)}; + if (!allocated_block) { + break; + } + + // Safely add it to our group + { + auto block_guard = detail::ScopeExit( + [&] { chosen_manager.Free(allocated_block, pages_per_alloc); }); + + if (const ResultCode result{page_list.AddBlock(allocated_block, pages_per_alloc)}; + result.IsError()) { + return result; + } + + block_guard.Cancel(); + } + + num_pages -= pages_per_alloc; + } + } + + // Only succeed if we allocated as many pages as we wanted + ASSERT(num_pages >= 0); + if (num_pages) { + return ERR_OUT_OF_MEMORY; + } + + // We succeeded! + group_guard.Cancel(); + return RESULT_SUCCESS; +} + +ResultCode MemoryManager::Free(PageLinkedList& page_list, std::size_t num_pages, Pool pool, + Direction dir) { + // Early return if we're freeing no pages + if (!num_pages) { + return RESULT_SUCCESS; + } + + // Lock the pool that we're freeing from + const auto pool_index{static_cast<std::size_t>(pool)}; + std::lock_guard lock{pool_locks[pool_index]}; + + // TODO (bunnei): Support multiple managers + Impl& chosen_manager{managers[pool_index]}; + + // Free all of the pages + for (const auto& it : page_list.Nodes()) { + const auto num_pages{std::min( + it.GetNumPages(), (chosen_manager.GetEndAddress() - it.GetAddress()) / PageSize)}; + chosen_manager.Free(it.GetAddress(), num_pages); + } + + return RESULT_SUCCESS; +} + +} // namespace Kernel::Memory diff --git a/src/core/hle/kernel/memory/memory_manager.h b/src/core/hle/kernel/memory/memory_manager.h new file mode 100644 index 000000000..b078d7a5e --- /dev/null +++ b/src/core/hle/kernel/memory/memory_manager.h @@ -0,0 +1,97 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include <array> +#include <mutex> + +#include "common/common_funcs.h" +#include "common/common_types.h" +#include "core/hle/kernel/memory/page_heap.h" +#include "core/hle/result.h" + +namespace Kernel::Memory { + +class PageLinkedList; + +class MemoryManager final : NonCopyable { +public: + enum class Pool : u32 { + Application = 0, + Applet = 1, + System = 2, + SystemNonSecure = 3, + + Count, + + Shift = 4, + Mask = (0xF << Shift), + }; + + enum class Direction : u32 { + FromFront = 0, + FromBack = 1, + + Shift = 0, + Mask = (0xF << Shift), + }; + + MemoryManager() = default; + + constexpr std::size_t GetSize(Pool pool) const { + return managers[static_cast<std::size_t>(pool)].GetSize(); + } + + void InitializeManager(Pool pool, u64 start_address, u64 end_address); + VAddr AllocateContinuous(std::size_t num_pages, std::size_t align_pages, Pool pool, + Direction dir = Direction::FromFront); + ResultCode Allocate(PageLinkedList& page_list, std::size_t num_pages, Pool pool, + Direction dir = Direction::FromFront); + ResultCode Free(PageLinkedList& page_list, std::size_t num_pages, Pool pool, + Direction dir = Direction::FromFront); + + static constexpr std::size_t MaxManagerCount = 10; + +private: + class Impl final : NonCopyable { + private: + using RefCount = u16; + + private: + PageHeap heap; + Pool pool{}; + + public: + Impl() = default; + + std::size_t Initialize(Pool new_pool, u64 start_address, u64 end_address); + + VAddr AllocateBlock(s32 index) { + return heap.AllocateBlock(index); + } + + void Free(VAddr addr, std::size_t num_pages) { + heap.Free(addr, num_pages); + } + + constexpr std::size_t GetSize() const { + return heap.GetSize(); + } + + constexpr VAddr GetAddress() const { + return heap.GetAddress(); + } + + constexpr VAddr GetEndAddress() const { + return heap.GetEndAddress(); + } + }; + +private: + std::array<std::mutex, static_cast<std::size_t>(Pool::Count)> pool_locks; + std::array<Impl, MaxManagerCount> managers; +}; + +} // namespace Kernel::Memory diff --git a/src/core/hle/kernel/memory/memory_types.h b/src/core/hle/kernel/memory/memory_types.h new file mode 100644 index 000000000..a75bf77c0 --- /dev/null +++ b/src/core/hle/kernel/memory/memory_types.h @@ -0,0 +1,18 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include <array> + +#include "common/common_types.h" + +namespace Kernel::Memory { + +constexpr std::size_t PageBits{12}; +constexpr std::size_t PageSize{1 << PageBits}; + +using Page = std::array<u8, PageSize>; + +} // namespace Kernel::Memory diff --git a/src/core/hle/kernel/memory/page_heap.cpp b/src/core/hle/kernel/memory/page_heap.cpp new file mode 100644 index 000000000..efcbb3cad --- /dev/null +++ b/src/core/hle/kernel/memory/page_heap.cpp @@ -0,0 +1,119 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +// This file references various implementation details from Atmosphère, an open-source firmware for +// the Nintendo Switch. Copyright 2018-2020 Atmosphère-NX. + +#include "core/core.h" +#include "core/hle/kernel/memory/page_heap.h" +#include "core/memory.h" + +namespace Kernel::Memory { + +void PageHeap::Initialize(VAddr address, std::size_t size, std::size_t metadata_size) { + // Check our assumptions + ASSERT(Common::IsAligned((address), PageSize)); + ASSERT(Common::IsAligned(size, PageSize)); + + // Set our members + heap_address = address; + heap_size = size; + + // Setup bitmaps + metadata.resize(metadata_size / sizeof(u64)); + u64* cur_bitmap_storage{metadata.data()}; + for (std::size_t i = 0; i < MemoryBlockPageShifts.size(); i++) { + const std::size_t cur_block_shift{MemoryBlockPageShifts[i]}; + const std::size_t next_block_shift{ + (i != MemoryBlockPageShifts.size() - 1) ? MemoryBlockPageShifts[i + 1] : 0}; + cur_bitmap_storage = blocks[i].Initialize(heap_address, heap_size, cur_block_shift, + next_block_shift, cur_bitmap_storage); + } +} + +VAddr PageHeap::AllocateBlock(s32 index) { + const std::size_t needed_size{blocks[index].GetSize()}; + + for (s32 i{index}; i < static_cast<s32>(MemoryBlockPageShifts.size()); i++) { + if (const VAddr addr{blocks[i].PopBlock()}; addr) { + if (const std::size_t allocated_size{blocks[i].GetSize()}; + allocated_size > needed_size) { + Free(addr + needed_size, (allocated_size - needed_size) / PageSize); + } + return addr; + } + } + + return 0; +} + +void PageHeap::FreeBlock(VAddr block, s32 index) { + do { + block = blocks[index++].PushBlock(block); + } while (block != 0); +} + +void PageHeap::Free(VAddr addr, std::size_t num_pages) { + // Freeing no pages is a no-op + if (num_pages == 0) { + return; + } + + // Find the largest block size that we can free, and free as many as possible + s32 big_index{static_cast<s32>(MemoryBlockPageShifts.size()) - 1}; + const VAddr start{addr}; + const VAddr end{(num_pages * PageSize) + addr}; + VAddr before_start{start}; + VAddr before_end{start}; + VAddr after_start{end}; + VAddr after_end{end}; + while (big_index >= 0) { + const std::size_t block_size{blocks[big_index].GetSize()}; + const VAddr big_start{Common::AlignUp((start), block_size)}; + const VAddr big_end{Common::AlignDown((end), block_size)}; + if (big_start < big_end) { + // Free as many big blocks as we can + for (auto block{big_start}; block < big_end; block += block_size) { + FreeBlock(block, big_index); + } + before_end = big_start; + after_start = big_end; + break; + } + big_index--; + } + ASSERT(big_index >= 0); + + // Free space before the big blocks + for (s32 i{big_index - 1}; i >= 0; i--) { + const std::size_t block_size{blocks[i].GetSize()}; + while (before_start + block_size <= before_end) { + before_end -= block_size; + FreeBlock(before_end, i); + } + } + + // Free space after the big blocks + for (s32 i{big_index - 1}; i >= 0; i--) { + const std::size_t block_size{blocks[i].GetSize()}; + while (after_start + block_size <= after_end) { + FreeBlock(after_start, i); + after_start += block_size; + } + } +} + +std::size_t PageHeap::CalculateMetadataOverheadSize(std::size_t region_size) { + std::size_t overhead_size = 0; + for (std::size_t i = 0; i < MemoryBlockPageShifts.size(); i++) { + const std::size_t cur_block_shift{MemoryBlockPageShifts[i]}; + const std::size_t next_block_shift{ + (i != MemoryBlockPageShifts.size() - 1) ? MemoryBlockPageShifts[i + 1] : 0}; + overhead_size += PageHeap::Block::CalculateMetadataOverheadSize( + region_size, cur_block_shift, next_block_shift); + } + return Common::AlignUp(overhead_size, PageSize); +} + +} // namespace Kernel::Memory diff --git a/src/core/hle/kernel/memory/page_heap.h b/src/core/hle/kernel/memory/page_heap.h new file mode 100644 index 000000000..380c3f5a1 --- /dev/null +++ b/src/core/hle/kernel/memory/page_heap.h @@ -0,0 +1,370 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +// This file references various implementation details from Atmosphère, an open-source firmware for +// the Nintendo Switch. Copyright 2018-2020 Atmosphère-NX. + +#pragma once + +#include <array> +#include <vector> + +#include "common/alignment.h" +#include "common/assert.h" +#include "common/bit_util.h" +#include "common/common_funcs.h" +#include "common/common_types.h" +#include "core/hle/kernel/memory/memory_types.h" + +namespace Kernel::Memory { + +class PageHeap final : NonCopyable { +public: + static constexpr s32 GetAlignedBlockIndex(std::size_t num_pages, std::size_t align_pages) { + const auto target_pages{std::max(num_pages, align_pages)}; + for (std::size_t i = 0; i < NumMemoryBlockPageShifts; i++) { + if (target_pages <= + (static_cast<std::size_t>(1) << MemoryBlockPageShifts[i]) / PageSize) { + return static_cast<s32>(i); + } + } + return -1; + } + + static constexpr s32 GetBlockIndex(std::size_t num_pages) { + for (s32 i{static_cast<s32>(NumMemoryBlockPageShifts) - 1}; i >= 0; i--) { + if (num_pages >= (static_cast<std::size_t>(1) << MemoryBlockPageShifts[i]) / PageSize) { + return i; + } + } + return -1; + } + + static constexpr std::size_t GetBlockSize(std::size_t index) { + return static_cast<std::size_t>(1) << MemoryBlockPageShifts[index]; + } + + static constexpr std::size_t GetBlockNumPages(std::size_t index) { + return GetBlockSize(index) / PageSize; + } + +private: + static constexpr std::size_t NumMemoryBlockPageShifts{7}; + static constexpr std::array<std::size_t, NumMemoryBlockPageShifts> MemoryBlockPageShifts{ + 0xC, 0x10, 0x15, 0x16, 0x19, 0x1D, 0x1E, + }; + + class Block final : NonCopyable { + private: + class Bitmap final : NonCopyable { + public: + static constexpr std::size_t MaxDepth{4}; + + private: + std::array<u64*, MaxDepth> bit_storages{}; + std::size_t num_bits{}; + std::size_t used_depths{}; + + public: + constexpr Bitmap() = default; + + constexpr std::size_t GetNumBits() const { + return num_bits; + } + constexpr s32 GetHighestDepthIndex() const { + return static_cast<s32>(used_depths) - 1; + } + + constexpr u64* Initialize(u64* storage, std::size_t size) { + //* Initially, everything is un-set + num_bits = 0; + + // Calculate the needed bitmap depth + used_depths = static_cast<std::size_t>(GetRequiredDepth(size)); + ASSERT(used_depths <= MaxDepth); + + // Set the bitmap pointers + for (s32 depth{GetHighestDepthIndex()}; depth >= 0; depth--) { + bit_storages[depth] = storage; + size = Common::AlignUp(size, 64) / 64; + storage += size; + } + + return storage; + } + + s64 FindFreeBlock() const { + uintptr_t offset{}; + s32 depth{}; + + do { + const u64 v{bit_storages[depth][offset]}; + if (v == 0) { + // Non-zero depth indicates that a previous level had a free block + ASSERT(depth == 0); + return -1; + } + offset = offset * 64 + Common::CountTrailingZeroes64(v); + ++depth; + } while (depth < static_cast<s32>(used_depths)); + + return static_cast<s64>(offset); + } + + constexpr void SetBit(std::size_t offset) { + SetBit(GetHighestDepthIndex(), offset); + num_bits++; + } + + constexpr void ClearBit(std::size_t offset) { + ClearBit(GetHighestDepthIndex(), offset); + num_bits--; + } + + constexpr bool ClearRange(std::size_t offset, std::size_t count) { + const s32 depth{GetHighestDepthIndex()}; + const auto bit_ind{offset / 64}; + u64* bits{bit_storages[depth]}; + if (count < 64) { + const auto shift{offset % 64}; + ASSERT(shift + count <= 64); + // Check that all the bits are set + const u64 mask{((1ULL << count) - 1) << shift}; + u64 v{bits[bit_ind]}; + if ((v & mask) != mask) { + return false; + } + + // Clear the bits + v &= ~mask; + bits[bit_ind] = v; + if (v == 0) { + ClearBit(depth - 1, bit_ind); + } + } else { + ASSERT(offset % 64 == 0); + ASSERT(count % 64 == 0); + // Check that all the bits are set + std::size_t remaining{count}; + std::size_t i = 0; + do { + if (bits[bit_ind + i++] != ~u64(0)) { + return false; + } + remaining -= 64; + } while (remaining > 0); + + // Clear the bits + remaining = count; + i = 0; + do { + bits[bit_ind + i] = 0; + ClearBit(depth - 1, bit_ind + i); + i++; + remaining -= 64; + } while (remaining > 0); + } + + num_bits -= count; + return true; + } + + private: + constexpr void SetBit(s32 depth, std::size_t offset) { + while (depth >= 0) { + const auto ind{offset / 64}; + const auto which{offset % 64}; + const u64 mask{1ULL << which}; + + u64* bit{std::addressof(bit_storages[depth][ind])}; + const u64 v{*bit}; + ASSERT((v & mask) == 0); + *bit = v | mask; + if (v) { + break; + } + offset = ind; + depth--; + } + } + + constexpr void ClearBit(s32 depth, std::size_t offset) { + while (depth >= 0) { + const auto ind{offset / 64}; + const auto which{offset % 64}; + const u64 mask{1ULL << which}; + + u64* bit{std::addressof(bit_storages[depth][ind])}; + u64 v{*bit}; + ASSERT((v & mask) != 0); + v &= ~mask; + *bit = v; + if (v) { + break; + } + offset = ind; + depth--; + } + } + + private: + static constexpr s32 GetRequiredDepth(std::size_t region_size) { + s32 depth = 0; + while (true) { + region_size /= 64; + depth++; + if (region_size == 0) { + return depth; + } + } + } + + public: + static constexpr std::size_t CalculateMetadataOverheadSize(std::size_t region_size) { + std::size_t overhead_bits = 0; + for (s32 depth{GetRequiredDepth(region_size) - 1}; depth >= 0; depth--) { + region_size = Common::AlignUp(region_size, 64) / 64; + overhead_bits += region_size; + } + return overhead_bits * sizeof(u64); + } + }; + + private: + Bitmap bitmap; + VAddr heap_address{}; + uintptr_t end_offset{}; + std::size_t block_shift{}; + std::size_t next_block_shift{}; + + public: + constexpr Block() = default; + + constexpr std::size_t GetShift() const { + return block_shift; + } + constexpr std::size_t GetNextShift() const { + return next_block_shift; + } + constexpr std::size_t GetSize() const { + return static_cast<std::size_t>(1) << GetShift(); + } + constexpr std::size_t GetNumPages() const { + return GetSize() / PageSize; + } + constexpr std::size_t GetNumFreeBlocks() const { + return bitmap.GetNumBits(); + } + constexpr std::size_t GetNumFreePages() const { + return GetNumFreeBlocks() * GetNumPages(); + } + + constexpr u64* Initialize(VAddr addr, std::size_t size, std::size_t bs, std::size_t nbs, + u64* bit_storage) { + // Set shifts + block_shift = bs; + next_block_shift = nbs; + + // Align up the address + VAddr end{addr + size}; + const auto align{(next_block_shift != 0) ? (1ULL << next_block_shift) + : (1ULL << block_shift)}; + addr = Common::AlignDown((addr), align); + end = Common::AlignUp((end), align); + + heap_address = addr; + end_offset = (end - addr) / (1ULL << block_shift); + return bitmap.Initialize(bit_storage, end_offset); + } + + constexpr VAddr PushBlock(VAddr address) { + // Set the bit for the free block + std::size_t offset{(address - heap_address) >> GetShift()}; + bitmap.SetBit(offset); + + // If we have a next shift, try to clear the blocks below and return the address + if (GetNextShift()) { + const auto diff{1ULL << (GetNextShift() - GetShift())}; + offset = Common::AlignDown(offset, diff); + if (bitmap.ClearRange(offset, diff)) { + return heap_address + (offset << GetShift()); + } + } + + // We couldn't coalesce, or we're already as big as possible + return 0; + } + + VAddr PopBlock() { + // Find a free block + const s64 soffset{bitmap.FindFreeBlock()}; + if (soffset < 0) { + return 0; + } + const auto offset{static_cast<std::size_t>(soffset)}; + + // Update our tracking and return it + bitmap.ClearBit(offset); + return heap_address + (offset << GetShift()); + } + + public: + static constexpr std::size_t CalculateMetadataOverheadSize(std::size_t region_size, + std::size_t cur_block_shift, + std::size_t next_block_shift) { + const auto cur_block_size{(1ULL << cur_block_shift)}; + const auto next_block_size{(1ULL << next_block_shift)}; + const auto align{(next_block_shift != 0) ? next_block_size : cur_block_size}; + return Bitmap::CalculateMetadataOverheadSize( + (align * 2 + Common::AlignUp(region_size, align)) / cur_block_size); + } + }; + +public: + PageHeap() = default; + + constexpr VAddr GetAddress() const { + return heap_address; + } + constexpr std::size_t GetSize() const { + return heap_size; + } + constexpr VAddr GetEndAddress() const { + return GetAddress() + GetSize(); + } + constexpr std::size_t GetPageOffset(VAddr block) const { + return (block - GetAddress()) / PageSize; + } + + void Initialize(VAddr heap_address, std::size_t heap_size, std::size_t metadata_size); + VAddr AllocateBlock(s32 index); + void Free(VAddr addr, std::size_t num_pages); + + void UpdateUsedSize() { + used_size = heap_size - (GetNumFreePages() * PageSize); + } + + static std::size_t CalculateMetadataOverheadSize(std::size_t region_size); + +private: + constexpr std::size_t GetNumFreePages() const { + std::size_t num_free{}; + + for (const auto& block : blocks) { + num_free += block.GetNumFreePages(); + } + + return num_free; + } + + void FreeBlock(VAddr block, s32 index); + + VAddr heap_address{}; + std::size_t heap_size{}; + std::size_t used_size{}; + std::array<Block, NumMemoryBlockPageShifts> blocks{}; + std::vector<u64> metadata; +}; + +} // namespace Kernel::Memory diff --git a/src/core/hle/kernel/memory/page_linked_list.h b/src/core/hle/kernel/memory/page_linked_list.h new file mode 100644 index 000000000..0668d00c6 --- /dev/null +++ b/src/core/hle/kernel/memory/page_linked_list.h @@ -0,0 +1,93 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include <list> + +#include "common/assert.h" +#include "common/common_funcs.h" +#include "common/common_types.h" +#include "core/hle/kernel/memory/memory_types.h" +#include "core/hle/result.h" + +namespace Kernel::Memory { + +class PageLinkedList final { +public: + class Node final { + public: + constexpr Node(u64 addr, std::size_t num_pages) : addr{addr}, num_pages{num_pages} {} + + constexpr u64 GetAddress() const { + return addr; + } + + constexpr std::size_t GetNumPages() const { + return num_pages; + } + + private: + u64 addr{}; + std::size_t num_pages{}; + }; + +public: + PageLinkedList() = default; + PageLinkedList(u64 address, u64 num_pages) { + ASSERT(AddBlock(address, num_pages).IsSuccess()); + } + + constexpr std::list<Node>& Nodes() { + return nodes; + } + + constexpr const std::list<Node>& Nodes() const { + return nodes; + } + + std::size_t GetNumPages() const { + std::size_t num_pages = 0; + for (const Node& node : nodes) { + num_pages += node.GetNumPages(); + } + return num_pages; + } + + bool IsEqual(PageLinkedList& other) const { + auto this_node = nodes.begin(); + auto other_node = other.nodes.begin(); + while (this_node != nodes.end() && other_node != other.nodes.end()) { + if (this_node->GetAddress() != other_node->GetAddress() || + this_node->GetNumPages() != other_node->GetNumPages()) { + return false; + } + this_node = std::next(this_node); + other_node = std::next(other_node); + } + + return this_node == nodes.end() && other_node == other.nodes.end(); + } + + ResultCode AddBlock(u64 address, u64 num_pages) { + if (!num_pages) { + return RESULT_SUCCESS; + } + if (!nodes.empty()) { + const auto node = nodes.back(); + if (node.GetAddress() + node.GetNumPages() * PageSize == address) { + address = node.GetAddress(); + num_pages += node.GetNumPages(); + nodes.pop_back(); + } + } + nodes.push_back({address, num_pages}); + return RESULT_SUCCESS; + } + +private: + std::list<Node> nodes; +}; + +} // namespace Kernel::Memory diff --git a/src/core/hle/kernel/memory/page_table.cpp b/src/core/hle/kernel/memory/page_table.cpp new file mode 100644 index 000000000..091e52ca4 --- /dev/null +++ b/src/core/hle/kernel/memory/page_table.cpp @@ -0,0 +1,1130 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "common/alignment.h" +#include "common/assert.h" +#include "common/scope_exit.h" +#include "core/core.h" +#include "core/device_memory.h" +#include "core/hle/kernel/errors.h" +#include "core/hle/kernel/kernel.h" +#include "core/hle/kernel/memory/address_space_info.h" +#include "core/hle/kernel/memory/memory_block.h" +#include "core/hle/kernel/memory/memory_block_manager.h" +#include "core/hle/kernel/memory/page_linked_list.h" +#include "core/hle/kernel/memory/page_table.h" +#include "core/hle/kernel/memory/system_control.h" +#include "core/hle/kernel/process.h" +#include "core/hle/kernel/resource_limit.h" +#include "core/memory.h" + +namespace Kernel::Memory { + +namespace { + +constexpr std::size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceType as_type) { + switch (as_type) { + case FileSys::ProgramAddressSpaceType::Is32Bit: + case FileSys::ProgramAddressSpaceType::Is32BitNoMap: + return 32; + case FileSys::ProgramAddressSpaceType::Is36Bit: + return 36; + case FileSys::ProgramAddressSpaceType::Is39Bit: + return 39; + default: + UNREACHABLE(); + return {}; + } +} + +constexpr u64 GetAddressInRange(const MemoryInfo& info, VAddr addr) { + if (info.GetAddress() < addr) { + return addr; + } + return info.GetAddress(); +} + +constexpr std::size_t GetSizeInRange(const MemoryInfo& info, VAddr start, VAddr end) { + std::size_t size{info.GetSize()}; + if (info.GetAddress() < start) { + size -= start - info.GetAddress(); + } + if (info.GetEndAddress() > end) { + size -= info.GetEndAddress() - end; + } + return size; +} + +} // namespace + +PageTable::PageTable(Core::System& system) : system{system} {} + +ResultCode PageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, + bool enable_aslr, VAddr code_addr, std::size_t code_size, + Memory::MemoryManager::Pool pool) { + + const auto GetSpaceStart = [this](AddressSpaceInfo::Type type) { + return AddressSpaceInfo::GetAddressSpaceStart(address_space_width, type); + }; + const auto GetSpaceSize = [this](AddressSpaceInfo::Type type) { + return AddressSpaceInfo::GetAddressSpaceSize(address_space_width, type); + }; + + // Set our width and heap/alias sizes + address_space_width = GetAddressSpaceWidthFromType(as_type); + const VAddr start = 0; + const VAddr end{1ULL << address_space_width}; + std::size_t alias_region_size{GetSpaceSize(AddressSpaceInfo::Type::Alias)}; + std::size_t heap_region_size{GetSpaceSize(AddressSpaceInfo::Type::Heap)}; + + ASSERT(start <= code_addr); + ASSERT(code_addr < code_addr + code_size); + ASSERT(code_addr + code_size - 1 <= end - 1); + + // Adjust heap/alias size if we don't have an alias region + if (as_type == FileSys::ProgramAddressSpaceType::Is32BitNoMap) { + heap_region_size += alias_region_size; + alias_region_size = 0; + } + + // Set code regions and determine remaining + constexpr std::size_t RegionAlignment{2 * 1024 * 1024}; + VAddr process_code_start{}; + VAddr process_code_end{}; + std::size_t stack_region_size{}; + std::size_t kernel_map_region_size{}; + + if (address_space_width == 39) { + alias_region_size = GetSpaceSize(AddressSpaceInfo::Type::Alias); + heap_region_size = GetSpaceSize(AddressSpaceInfo::Type::Heap); + stack_region_size = GetSpaceSize(AddressSpaceInfo::Type::Stack); + kernel_map_region_size = GetSpaceSize(AddressSpaceInfo::Type::Is32Bit); + code_region_start = GetSpaceStart(AddressSpaceInfo::Type::Large64Bit); + code_region_end = code_region_start + GetSpaceSize(AddressSpaceInfo::Type::Large64Bit); + alias_code_region_start = code_region_start; + alias_code_region_end = code_region_end; + process_code_start = Common::AlignDown(code_addr, RegionAlignment); + process_code_end = Common::AlignUp(code_addr + code_size, RegionAlignment); + } else { + stack_region_size = 0; + kernel_map_region_size = 0; + code_region_start = GetSpaceStart(AddressSpaceInfo::Type::Is32Bit); + code_region_end = code_region_start + GetSpaceSize(AddressSpaceInfo::Type::Is32Bit); + stack_region_start = code_region_start; + alias_code_region_start = code_region_start; + alias_code_region_end = GetSpaceStart(AddressSpaceInfo::Type::Small64Bit) + + GetSpaceSize(AddressSpaceInfo::Type::Small64Bit); + stack_region_end = code_region_end; + kernel_map_region_start = code_region_start; + kernel_map_region_end = code_region_end; + process_code_start = code_region_start; + process_code_end = code_region_end; + } + + // Set other basic fields + is_aslr_enabled = enable_aslr; + address_space_start = start; + address_space_end = end; + is_kernel = false; + + // Determine the region we can place our undetermineds in + VAddr alloc_start{}; + std::size_t alloc_size{}; + if ((process_code_start - code_region_start) >= (end - process_code_end)) { + alloc_start = code_region_start; + alloc_size = process_code_start - code_region_start; + } else { + alloc_start = process_code_end; + alloc_size = end - process_code_end; + } + const std::size_t needed_size{ + (alias_region_size + heap_region_size + stack_region_size + kernel_map_region_size)}; + if (alloc_size < needed_size) { + UNREACHABLE(); + return ERR_OUT_OF_MEMORY; + } + + const std::size_t remaining_size{alloc_size - needed_size}; + + // Determine random placements for each region + std::size_t alias_rnd{}, heap_rnd{}, stack_rnd{}, kmap_rnd{}; + if (enable_aslr) { + alias_rnd = SystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) * + RegionAlignment; + heap_rnd = SystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) * + RegionAlignment; + stack_rnd = SystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) * + RegionAlignment; + kmap_rnd = SystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) * + RegionAlignment; + } + + // Setup heap and alias regions + alias_region_start = alloc_start + alias_rnd; + alias_region_end = alias_region_start + alias_region_size; + heap_region_start = alloc_start + heap_rnd; + heap_region_end = heap_region_start + heap_region_size; + + if (alias_rnd <= heap_rnd) { + heap_region_start += alias_region_size; + heap_region_end += alias_region_size; + } else { + alias_region_start += heap_region_size; + alias_region_end += heap_region_size; + } + + // Setup stack region + if (stack_region_size) { + stack_region_start = alloc_start + stack_rnd; + stack_region_end = stack_region_start + stack_region_size; + + if (alias_rnd < stack_rnd) { + stack_region_start += alias_region_size; + stack_region_end += alias_region_size; + } else { + alias_region_start += stack_region_size; + alias_region_end += stack_region_size; + } + + if (heap_rnd < stack_rnd) { + stack_region_start += heap_region_size; + stack_region_end += heap_region_size; + } else { + heap_region_start += stack_region_size; + heap_region_end += stack_region_size; + } + } + + // Setup kernel map region + if (kernel_map_region_size) { + kernel_map_region_start = alloc_start + kmap_rnd; + kernel_map_region_end = kernel_map_region_start + kernel_map_region_size; + + if (alias_rnd < kmap_rnd) { + kernel_map_region_start += alias_region_size; + kernel_map_region_end += alias_region_size; + } else { + alias_region_start += kernel_map_region_size; + alias_region_end += kernel_map_region_size; + } + + if (heap_rnd < kmap_rnd) { + kernel_map_region_start += heap_region_size; + kernel_map_region_end += heap_region_size; + } else { + heap_region_start += kernel_map_region_size; + heap_region_end += kernel_map_region_size; + } + + if (stack_region_size) { + if (stack_rnd < kmap_rnd) { + kernel_map_region_start += stack_region_size; + kernel_map_region_end += stack_region_size; + } else { + stack_region_start += kernel_map_region_size; + stack_region_end += kernel_map_region_size; + } + } + } + + // Set heap members + current_heap_end = heap_region_start; + max_heap_size = 0; + max_physical_memory_size = 0; + + // Ensure that we regions inside our address space + auto IsInAddressSpace = [&](VAddr addr) { + return address_space_start <= addr && addr <= address_space_end; + }; + ASSERT(IsInAddressSpace(alias_region_start)); + ASSERT(IsInAddressSpace(alias_region_end)); + ASSERT(IsInAddressSpace(heap_region_start)); + ASSERT(IsInAddressSpace(heap_region_end)); + ASSERT(IsInAddressSpace(stack_region_start)); + ASSERT(IsInAddressSpace(stack_region_end)); + ASSERT(IsInAddressSpace(kernel_map_region_start)); + ASSERT(IsInAddressSpace(kernel_map_region_end)); + + // Ensure that we selected regions that don't overlap + const VAddr alias_start{alias_region_start}; + const VAddr alias_last{alias_region_end - 1}; + const VAddr heap_start{heap_region_start}; + const VAddr heap_last{heap_region_end - 1}; + const VAddr stack_start{stack_region_start}; + const VAddr stack_last{stack_region_end - 1}; + const VAddr kmap_start{kernel_map_region_start}; + const VAddr kmap_last{kernel_map_region_end - 1}; + ASSERT(alias_last < heap_start || heap_last < alias_start); + ASSERT(alias_last < stack_start || stack_last < alias_start); + ASSERT(alias_last < kmap_start || kmap_last < alias_start); + ASSERT(heap_last < stack_start || stack_last < heap_start); + ASSERT(heap_last < kmap_start || kmap_last < heap_start); + + current_heap_addr = heap_region_start; + heap_capacity = 0; + physical_memory_usage = 0; + memory_pool = pool; + + page_table_impl.Resize(address_space_width, PageBits, true); + + return InitializeMemoryLayout(start, end); +} + +ResultCode PageTable::MapProcessCode(VAddr addr, std::size_t num_pages, MemoryState state, + MemoryPermission perm) { + std::lock_guard lock{page_table_lock}; + + const u64 size{num_pages * PageSize}; + + if (!CanContain(addr, size, state)) { + return ERR_INVALID_ADDRESS_STATE; + } + + if (IsRegionMapped(addr, size)) { + return ERR_INVALID_ADDRESS_STATE; + } + + PageLinkedList page_linked_list; + CASCADE_CODE( + system.Kernel().MemoryManager().Allocate(page_linked_list, num_pages, memory_pool)); + CASCADE_CODE(Operate(addr, num_pages, page_linked_list, OperationType::MapGroup)); + + block_manager->Update(addr, num_pages, state, perm); + + return RESULT_SUCCESS; +} + +ResultCode PageTable::MapProcessCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) { + std::lock_guard lock{page_table_lock}; + + const std::size_t num_pages{size / PageSize}; + + MemoryState state{}; + MemoryPermission perm{}; + CASCADE_CODE(CheckMemoryState(&state, &perm, nullptr, src_addr, size, MemoryState::All, + MemoryState::Normal, MemoryPermission::Mask, + MemoryPermission::ReadAndWrite, MemoryAttribute::Mask, + MemoryAttribute::None, MemoryAttribute::IpcAndDeviceMapped)); + + if (IsRegionMapped(dst_addr, size)) { + return ERR_INVALID_ADDRESS_STATE; + } + + PageLinkedList page_linked_list; + AddRegionToPages(src_addr, num_pages, page_linked_list); + + { + auto block_guard = detail::ScopeExit( + [&] { Operate(src_addr, num_pages, perm, OperationType::ChangePermissions); }); + + CASCADE_CODE( + Operate(src_addr, num_pages, MemoryPermission::None, OperationType::ChangePermissions)); + CASCADE_CODE(MapPages(dst_addr, page_linked_list, MemoryPermission::None)); + + block_guard.Cancel(); + } + + block_manager->Update(src_addr, num_pages, state, MemoryPermission::None, + MemoryAttribute::Locked); + block_manager->Update(dst_addr, num_pages, MemoryState::AliasCode); + + return RESULT_SUCCESS; +} + +ResultCode PageTable::UnmapProcessCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) { + std::lock_guard lock{page_table_lock}; + + if (!size) { + return RESULT_SUCCESS; + } + + const std::size_t num_pages{size / PageSize}; + + CASCADE_CODE(CheckMemoryState(nullptr, nullptr, nullptr, src_addr, size, MemoryState::All, + MemoryState::Normal, MemoryPermission::None, + MemoryPermission::None, MemoryAttribute::Mask, + MemoryAttribute::Locked, MemoryAttribute::IpcAndDeviceMapped)); + + MemoryState state{}; + CASCADE_CODE(CheckMemoryState( + &state, nullptr, nullptr, dst_addr, PageSize, MemoryState::FlagCanCodeAlias, + MemoryState::FlagCanCodeAlias, MemoryPermission::None, MemoryPermission::None, + MemoryAttribute::Mask, MemoryAttribute::None, MemoryAttribute::IpcAndDeviceMapped)); + CASCADE_CODE(CheckMemoryState(dst_addr, size, MemoryState::All, state, MemoryPermission::None, + MemoryPermission::None, MemoryAttribute::Mask, + MemoryAttribute::None)); + CASCADE_CODE(Operate(dst_addr, num_pages, MemoryPermission::None, OperationType::Unmap)); + + block_manager->Update(dst_addr, num_pages, MemoryState::Free); + block_manager->Update(src_addr, num_pages, MemoryState::Normal, MemoryPermission::ReadAndWrite); + + return RESULT_SUCCESS; +} + +void PageTable::MapPhysicalMemory(PageLinkedList& page_linked_list, VAddr start, VAddr end) { + auto node{page_linked_list.Nodes().begin()}; + PAddr map_addr{node->GetAddress()}; + std::size_t src_num_pages{node->GetNumPages()}; + + block_manager->IterateForRange(start, end, [&](const MemoryInfo& info) { + if (info.state != MemoryState::Free) { + return; + } + + std::size_t dst_num_pages{GetSizeInRange(info, start, end) / PageSize}; + VAddr dst_addr{GetAddressInRange(info, start)}; + + while (dst_num_pages) { + if (!src_num_pages) { + node = std::next(node); + map_addr = node->GetAddress(); + src_num_pages = node->GetNumPages(); + } + + const std::size_t num_pages{std::min(src_num_pages, dst_num_pages)}; + Operate(dst_addr, num_pages, MemoryPermission::ReadAndWrite, OperationType::Map, + map_addr); + + dst_addr += num_pages * PageSize; + map_addr += num_pages * PageSize; + src_num_pages -= num_pages; + dst_num_pages -= num_pages; + } + }); +} + +ResultCode PageTable::MapPhysicalMemory(VAddr addr, std::size_t size) { + std::lock_guard lock{page_table_lock}; + + std::size_t mapped_size{}; + const VAddr end_addr{addr + size}; + + block_manager->IterateForRange(addr, end_addr, [&](const MemoryInfo& info) { + if (info.state != MemoryState::Free) { + mapped_size += GetSizeInRange(info, addr, end_addr); + } + }); + + if (mapped_size == size) { + return RESULT_SUCCESS; + } + + auto process{system.Kernel().CurrentProcess()}; + const std::size_t remaining_size{size - mapped_size}; + const std::size_t remaining_pages{remaining_size / PageSize}; + + if (process->GetResourceLimit() && + !process->GetResourceLimit()->Reserve(ResourceType::PhysicalMemory, remaining_size)) { + return ERR_RESOURCE_LIMIT_EXCEEDED; + } + + PageLinkedList page_linked_list; + { + auto block_guard = detail::ScopeExit([&] { + system.Kernel().MemoryManager().Free(page_linked_list, remaining_pages, memory_pool); + process->GetResourceLimit()->Release(ResourceType::PhysicalMemory, remaining_size); + }); + + CASCADE_CODE(system.Kernel().MemoryManager().Allocate(page_linked_list, remaining_pages, + memory_pool)); + + block_guard.Cancel(); + } + + MapPhysicalMemory(page_linked_list, addr, end_addr); + + physical_memory_usage += remaining_size; + + const std::size_t num_pages{size / PageSize}; + block_manager->Update(addr, num_pages, MemoryState::Free, MemoryPermission::None, + MemoryAttribute::None, MemoryState::Normal, + MemoryPermission::ReadAndWrite, MemoryAttribute::None); + + return RESULT_SUCCESS; +} + +ResultCode PageTable::UnmapPhysicalMemory(VAddr addr, std::size_t size) { + std::lock_guard lock{page_table_lock}; + + const VAddr end_addr{addr + size}; + ResultCode result{RESULT_SUCCESS}; + std::size_t mapped_size{}; + + // Verify that the region can be unmapped + block_manager->IterateForRange(addr, end_addr, [&](const MemoryInfo& info) { + if (info.state == MemoryState::Normal) { + if (info.attribute != MemoryAttribute::None) { + result = ERR_INVALID_ADDRESS_STATE; + return; + } + mapped_size += GetSizeInRange(info, addr, end_addr); + } else if (info.state != MemoryState::Free) { + result = ERR_INVALID_ADDRESS_STATE; + } + }); + + if (result.IsError()) { + return result; + } + + if (!mapped_size) { + return RESULT_SUCCESS; + } + + CASCADE_CODE(UnmapMemory(addr, size)); + + auto process{system.Kernel().CurrentProcess()}; + process->GetResourceLimit()->Release(ResourceType::PhysicalMemory, mapped_size); + physical_memory_usage -= mapped_size; + + return RESULT_SUCCESS; +} + +ResultCode PageTable::UnmapMemory(VAddr addr, std::size_t size) { + std::lock_guard lock{page_table_lock}; + + const VAddr end_addr{addr + size}; + ResultCode result{RESULT_SUCCESS}; + PageLinkedList page_linked_list; + + // Unmap each region within the range + block_manager->IterateForRange(addr, end_addr, [&](const MemoryInfo& info) { + if (info.state == MemoryState::Normal) { + const std::size_t block_size{GetSizeInRange(info, addr, end_addr)}; + const std::size_t block_num_pages{block_size / PageSize}; + const VAddr block_addr{GetAddressInRange(info, addr)}; + + AddRegionToPages(block_addr, block_size / PageSize, page_linked_list); + + if (result = Operate(block_addr, block_num_pages, MemoryPermission::None, + OperationType::Unmap); + result.IsError()) { + return; + } + } + }); + + if (result.IsError()) { + return result; + } + + const std::size_t num_pages{size / PageSize}; + system.Kernel().MemoryManager().Free(page_linked_list, num_pages, memory_pool); + + block_manager->Update(addr, num_pages, MemoryState::Free); + + return RESULT_SUCCESS; +} + +ResultCode PageTable::Map(VAddr dst_addr, VAddr src_addr, std::size_t size) { + std::lock_guard lock{page_table_lock}; + + MemoryState src_state{}; + CASCADE_CODE(CheckMemoryState( + &src_state, nullptr, nullptr, src_addr, size, MemoryState::FlagCanAlias, + MemoryState::FlagCanAlias, MemoryPermission::Mask, MemoryPermission::ReadAndWrite, + MemoryAttribute::Mask, MemoryAttribute::None, MemoryAttribute::IpcAndDeviceMapped)); + + if (IsRegionMapped(dst_addr, size)) { + return ERR_INVALID_ADDRESS_STATE; + } + + PageLinkedList page_linked_list; + const std::size_t num_pages{size / PageSize}; + + AddRegionToPages(src_addr, num_pages, page_linked_list); + + { + auto block_guard = detail::ScopeExit([&] { + Operate(src_addr, num_pages, MemoryPermission::ReadAndWrite, + OperationType::ChangePermissions); + }); + + CASCADE_CODE( + Operate(src_addr, num_pages, MemoryPermission::None, OperationType::ChangePermissions)); + CASCADE_CODE(MapPages(dst_addr, page_linked_list, MemoryPermission::ReadAndWrite)); + + block_guard.Cancel(); + } + + block_manager->Update(src_addr, num_pages, src_state, MemoryPermission::None, + MemoryAttribute::Locked); + block_manager->Update(dst_addr, num_pages, MemoryState::Stack, MemoryPermission::ReadAndWrite); + + return RESULT_SUCCESS; +} + +ResultCode PageTable::Unmap(VAddr dst_addr, VAddr src_addr, std::size_t size) { + std::lock_guard lock{page_table_lock}; + + MemoryState src_state{}; + CASCADE_CODE(CheckMemoryState( + &src_state, nullptr, nullptr, src_addr, size, MemoryState::FlagCanAlias, + MemoryState::FlagCanAlias, MemoryPermission::Mask, MemoryPermission::None, + MemoryAttribute::Mask, MemoryAttribute::Locked, MemoryAttribute::IpcAndDeviceMapped)); + + MemoryPermission dst_perm{}; + CASCADE_CODE(CheckMemoryState(nullptr, &dst_perm, nullptr, dst_addr, size, MemoryState::All, + MemoryState::Stack, MemoryPermission::None, + MemoryPermission::None, MemoryAttribute::Mask, + MemoryAttribute::None, MemoryAttribute::IpcAndDeviceMapped)); + + PageLinkedList src_pages; + PageLinkedList dst_pages; + const std::size_t num_pages{size / PageSize}; + + AddRegionToPages(src_addr, num_pages, src_pages); + AddRegionToPages(dst_addr, num_pages, dst_pages); + + if (!dst_pages.IsEqual(src_pages)) { + return ERR_INVALID_MEMORY_RANGE; + } + + { + auto block_guard = detail::ScopeExit([&] { MapPages(dst_addr, dst_pages, dst_perm); }); + + CASCADE_CODE(Operate(dst_addr, num_pages, MemoryPermission::None, OperationType::Unmap)); + CASCADE_CODE(Operate(src_addr, num_pages, MemoryPermission::ReadAndWrite, + OperationType::ChangePermissions)); + + block_guard.Cancel(); + } + + block_manager->Update(src_addr, num_pages, src_state, MemoryPermission::ReadAndWrite); + block_manager->Update(dst_addr, num_pages, MemoryState::Free); + + return RESULT_SUCCESS; +} + +ResultCode PageTable::MapPages(VAddr addr, const PageLinkedList& page_linked_list, + MemoryPermission perm) { + VAddr cur_addr{addr}; + + for (const auto& node : page_linked_list.Nodes()) { + if (const auto result{ + Operate(cur_addr, node.GetNumPages(), perm, OperationType::Map, node.GetAddress())}; + result.IsError()) { + const MemoryInfo info{block_manager->FindBlock(cur_addr).GetMemoryInfo()}; + const std::size_t num_pages{(addr - cur_addr) / PageSize}; + + ASSERT( + Operate(addr, num_pages, MemoryPermission::None, OperationType::Unmap).IsSuccess()); + + return result; + } + + cur_addr += node.GetNumPages() * PageSize; + } + + return RESULT_SUCCESS; +} + +ResultCode PageTable::MapPages(VAddr addr, PageLinkedList& page_linked_list, MemoryState state, + MemoryPermission perm) { + std::lock_guard lock{page_table_lock}; + + const std::size_t num_pages{page_linked_list.GetNumPages()}; + const std::size_t size{num_pages * PageSize}; + + if (!CanContain(addr, size, state)) { + return ERR_INVALID_ADDRESS_STATE; + } + + if (IsRegionMapped(addr, num_pages * PageSize)) { + return ERR_INVALID_ADDRESS_STATE; + } + + CASCADE_CODE(MapPages(addr, page_linked_list, perm)); + + block_manager->Update(addr, num_pages, state, perm); + + return RESULT_SUCCESS; +} + +ResultCode PageTable::SetCodeMemoryPermission(VAddr addr, std::size_t size, MemoryPermission perm) { + + std::lock_guard lock{page_table_lock}; + + MemoryState prev_state{}; + MemoryPermission prev_perm{}; + + CASCADE_CODE(CheckMemoryState( + &prev_state, &prev_perm, nullptr, addr, size, MemoryState::FlagCode, MemoryState::FlagCode, + MemoryPermission::None, MemoryPermission::None, MemoryAttribute::Mask, + MemoryAttribute::None, MemoryAttribute::IpcAndDeviceMapped)); + + MemoryState state{prev_state}; + + // Ensure state is mutable if permission allows write + if ((perm & MemoryPermission::Write) != MemoryPermission::None) { + if (prev_state == MemoryState::Code) { + state = MemoryState::CodeData; + } else if (prev_state == MemoryState::AliasCode) { + state = MemoryState::AliasCodeData; + } else { + UNREACHABLE(); + } + } + + // Return early if there is nothing to change + if (state == prev_state && perm == prev_perm) { + return RESULT_SUCCESS; + } + + const std::size_t num_pages{size / PageSize}; + const OperationType operation{(perm & MemoryPermission::Execute) != MemoryPermission::None + ? OperationType::ChangePermissionsAndRefresh + : OperationType::ChangePermissions}; + + CASCADE_CODE(Operate(addr, num_pages, perm, operation)); + + block_manager->Update(addr, num_pages, state, perm); + + return RESULT_SUCCESS; +} + +MemoryInfo PageTable::QueryInfoImpl(VAddr addr) { + std::lock_guard lock{page_table_lock}; + + return block_manager->FindBlock(addr).GetMemoryInfo(); +} + +MemoryInfo PageTable::QueryInfo(VAddr addr) { + if (!Contains(addr, 1)) { + return {address_space_end, 0 - address_space_end, MemoryState::Inaccessible, + MemoryPermission::None, MemoryAttribute::None, MemoryPermission::None}; + } + + return QueryInfoImpl(addr); +} + +ResultCode PageTable::ReserveTransferMemory(VAddr addr, std::size_t size, MemoryPermission perm) { + std::lock_guard lock{page_table_lock}; + + MemoryState state{}; + MemoryAttribute attribute{}; + + CASCADE_CODE(CheckMemoryState(&state, nullptr, &attribute, addr, size, + MemoryState::FlagCanTransfer | MemoryState::FlagReferenceCounted, + MemoryState::FlagCanTransfer | MemoryState::FlagReferenceCounted, + MemoryPermission::Mask, MemoryPermission::ReadAndWrite, + MemoryAttribute::Mask, MemoryAttribute::None, + MemoryAttribute::IpcAndDeviceMapped)); + + block_manager->Update(addr, size / PageSize, state, perm, attribute | MemoryAttribute::Locked); + + return RESULT_SUCCESS; +} + +ResultCode PageTable::ResetTransferMemory(VAddr addr, std::size_t size) { + std::lock_guard lock{page_table_lock}; + + MemoryState state{}; + + CASCADE_CODE(CheckMemoryState(&state, nullptr, nullptr, addr, size, + MemoryState::FlagCanTransfer | MemoryState::FlagReferenceCounted, + MemoryState::FlagCanTransfer | MemoryState::FlagReferenceCounted, + MemoryPermission::None, MemoryPermission::None, + MemoryAttribute::Mask, MemoryAttribute::Locked, + MemoryAttribute::IpcAndDeviceMapped)); + + block_manager->Update(addr, size / PageSize, state, MemoryPermission::ReadAndWrite); + + return RESULT_SUCCESS; +} + +ResultCode PageTable::SetMemoryAttribute(VAddr addr, std::size_t size, MemoryAttribute mask, + MemoryAttribute value) { + std::lock_guard lock{page_table_lock}; + + MemoryState state{}; + MemoryPermission perm{}; + MemoryAttribute attribute{}; + + CASCADE_CODE(CheckMemoryState(&state, &perm, &attribute, addr, size, + MemoryState::FlagCanChangeAttribute, + MemoryState::FlagCanChangeAttribute, MemoryPermission::None, + MemoryPermission::None, MemoryAttribute::LockedAndIpcLocked, + MemoryAttribute::None, MemoryAttribute::DeviceSharedAndUncached)); + + attribute = attribute & ~mask; + attribute = attribute | (mask & value); + + block_manager->Update(addr, size / PageSize, state, perm, attribute); + + return RESULT_SUCCESS; +} + +ResultCode PageTable::SetHeapCapacity(std::size_t new_heap_capacity) { + std::lock_guard lock{page_table_lock}; + heap_capacity = new_heap_capacity; + return RESULT_SUCCESS; +} + +ResultVal<VAddr> PageTable::SetHeapSize(std::size_t size) { + + if (size > heap_region_end - heap_region_start) { + return ERR_OUT_OF_MEMORY; + } + + const u64 previous_heap_size{GetHeapSize()}; + + UNIMPLEMENTED_IF_MSG(previous_heap_size > size, "Heap shrink is unimplemented"); + + // Increase the heap size + { + std::lock_guard lock{page_table_lock}; + + const u64 delta{size - previous_heap_size}; + + auto process{system.Kernel().CurrentProcess()}; + if (process->GetResourceLimit() && delta != 0 && + !process->GetResourceLimit()->Reserve(ResourceType::PhysicalMemory, delta)) { + return ERR_RESOURCE_LIMIT_EXCEEDED; + } + + PageLinkedList page_linked_list; + const std::size_t num_pages{delta / PageSize}; + + CASCADE_CODE( + system.Kernel().MemoryManager().Allocate(page_linked_list, num_pages, memory_pool)); + + if (IsRegionMapped(current_heap_addr, delta)) { + return ERR_INVALID_ADDRESS_STATE; + } + + CASCADE_CODE( + Operate(current_heap_addr, num_pages, page_linked_list, OperationType::MapGroup)); + + block_manager->Update(current_heap_addr, num_pages, MemoryState::Normal, + MemoryPermission::ReadAndWrite); + + current_heap_addr = heap_region_start + size; + } + + return MakeResult<VAddr>(heap_region_start); +} + +ResultVal<VAddr> PageTable::AllocateAndMapMemory(std::size_t needed_num_pages, std::size_t align, + bool is_map_only, VAddr region_start, + std::size_t region_num_pages, MemoryState state, + MemoryPermission perm, PAddr map_addr) { + std::lock_guard lock{page_table_lock}; + + if (!CanContain(region_start, region_num_pages * PageSize, state)) { + return ERR_INVALID_ADDRESS_STATE; + } + + if (region_num_pages <= needed_num_pages) { + return ERR_OUT_OF_MEMORY; + } + + const VAddr addr{ + AllocateVirtualMemory(region_start, region_num_pages, needed_num_pages, align)}; + if (!addr) { + return ERR_OUT_OF_MEMORY; + } + + if (is_map_only) { + CASCADE_CODE(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr)); + } else { + PageLinkedList page_group; + CASCADE_CODE( + system.Kernel().MemoryManager().Allocate(page_group, needed_num_pages, memory_pool)); + CASCADE_CODE(Operate(addr, needed_num_pages, page_group, OperationType::MapGroup)); + } + + block_manager->Update(addr, needed_num_pages, state, perm); + + return MakeResult<VAddr>(addr); +} + +ResultCode PageTable::InitializeMemoryLayout(VAddr start, VAddr end) { + block_manager = std::make_unique<MemoryBlockManager>(start, end); + + return RESULT_SUCCESS; +} + +bool PageTable::IsRegionMapped(VAddr address, u64 size) { + return CheckMemoryState(address, size, MemoryState::All, MemoryState::Free, + MemoryPermission::Mask, MemoryPermission::None, MemoryAttribute::Mask, + MemoryAttribute::None, MemoryAttribute::IpcAndDeviceMapped) + .IsError(); +} + +bool PageTable::IsRegionContiguous(VAddr addr, u64 size) const { + auto start_ptr = system.Memory().GetPointer(addr); + for (u64 offset{}; offset < size; offset += PageSize) { + if (start_ptr != system.Memory().GetPointer(addr + offset)) { + return false; + } + start_ptr += PageSize; + } + return true; +} + +void PageTable::AddRegionToPages(VAddr start, std::size_t num_pages, + PageLinkedList& page_linked_list) { + VAddr addr{start}; + while (addr < start + (num_pages * PageSize)) { + const PAddr paddr{GetPhysicalAddr(addr)}; + if (!paddr) { + UNREACHABLE(); + } + page_linked_list.AddBlock(paddr, 1); + addr += PageSize; + } +} + +VAddr PageTable::AllocateVirtualMemory(VAddr start, std::size_t region_num_pages, + u64 needed_num_pages, std::size_t align) { + if (is_aslr_enabled) { + UNIMPLEMENTED(); + } + return block_manager->FindFreeArea(start, region_num_pages, needed_num_pages, align, 0, + IsKernel() ? 1 : 4); +} + +ResultCode PageTable::Operate(VAddr addr, std::size_t num_pages, const PageLinkedList& page_group, + OperationType operation) { + std::lock_guard lock{page_table_lock}; + + ASSERT(Common::IsAligned(addr, PageSize)); + ASSERT(num_pages > 0); + ASSERT(num_pages == page_group.GetNumPages()); + + for (const auto& node : page_group.Nodes()) { + const std::size_t size{node.GetNumPages() * PageSize}; + + switch (operation) { + case OperationType::MapGroup: + system.Memory().MapMemoryRegion(page_table_impl, addr, size, node.GetAddress()); + break; + default: + UNREACHABLE(); + } + + addr += size; + } + + return RESULT_SUCCESS; +} + +ResultCode PageTable::Operate(VAddr addr, std::size_t num_pages, MemoryPermission perm, + OperationType operation, PAddr map_addr) { + std::lock_guard lock{page_table_lock}; + + ASSERT(num_pages > 0); + ASSERT(Common::IsAligned(addr, PageSize)); + ASSERT(ContainsPages(addr, num_pages)); + + switch (operation) { + case OperationType::Unmap: + system.Memory().UnmapRegion(page_table_impl, addr, num_pages * PageSize); + break; + case OperationType::Map: { + ASSERT(map_addr); + ASSERT(Common::IsAligned(map_addr, PageSize)); + system.Memory().MapMemoryRegion(page_table_impl, addr, num_pages * PageSize, map_addr); + break; + } + case OperationType::ChangePermissions: + case OperationType::ChangePermissionsAndRefresh: + break; + default: + UNREACHABLE(); + } + return RESULT_SUCCESS; +} + +constexpr VAddr PageTable::GetRegionAddress(MemoryState state) const { + switch (state) { + case MemoryState::Free: + case MemoryState::Kernel: + return address_space_start; + case MemoryState::Normal: + return heap_region_start; + case MemoryState::Ipc: + case MemoryState::NonSecureIpc: + case MemoryState::NonDeviceIpc: + return alias_region_start; + case MemoryState::Stack: + return stack_region_start; + case MemoryState::Io: + case MemoryState::Static: + case MemoryState::ThreadLocal: + return kernel_map_region_start; + case MemoryState::Shared: + case MemoryState::AliasCode: + case MemoryState::AliasCodeData: + case MemoryState::Transfered: + case MemoryState::SharedTransfered: + case MemoryState::SharedCode: + case MemoryState::GeneratedCode: + case MemoryState::CodeOut: + return alias_code_region_start; + case MemoryState::Code: + case MemoryState::CodeData: + return code_region_start; + default: + UNREACHABLE(); + return {}; + } +} + +constexpr std::size_t PageTable::GetRegionSize(MemoryState state) const { + switch (state) { + case MemoryState::Free: + case MemoryState::Kernel: + return address_space_end - address_space_start; + case MemoryState::Normal: + return heap_region_end - heap_region_start; + case MemoryState::Ipc: + case MemoryState::NonSecureIpc: + case MemoryState::NonDeviceIpc: + return alias_region_end - alias_region_start; + case MemoryState::Stack: + return stack_region_end - stack_region_start; + case MemoryState::Io: + case MemoryState::Static: + case MemoryState::ThreadLocal: + return kernel_map_region_end - kernel_map_region_start; + case MemoryState::Shared: + case MemoryState::AliasCode: + case MemoryState::AliasCodeData: + case MemoryState::Transfered: + case MemoryState::SharedTransfered: + case MemoryState::SharedCode: + case MemoryState::GeneratedCode: + case MemoryState::CodeOut: + return alias_code_region_end - alias_code_region_start; + case MemoryState::Code: + case MemoryState::CodeData: + return code_region_end - code_region_start; + default: + UNREACHABLE(); + return {}; + } +} + +constexpr bool PageTable::CanContain(VAddr addr, std::size_t size, MemoryState state) const { + const VAddr end{addr + size}; + const VAddr last{end - 1}; + const VAddr region_start{GetRegionAddress(state)}; + const std::size_t region_size{GetRegionSize(state)}; + const bool is_in_region{region_start <= addr && addr < end && + last <= region_start + region_size - 1}; + const bool is_in_heap{!(end <= heap_region_start || heap_region_end <= addr)}; + const bool is_in_alias{!(end <= alias_region_start || alias_region_end <= addr)}; + + switch (state) { + case MemoryState::Free: + case MemoryState::Kernel: + return is_in_region; + case MemoryState::Io: + case MemoryState::Static: + case MemoryState::Code: + case MemoryState::CodeData: + case MemoryState::Shared: + case MemoryState::AliasCode: + case MemoryState::AliasCodeData: + case MemoryState::Stack: + case MemoryState::ThreadLocal: + case MemoryState::Transfered: + case MemoryState::SharedTransfered: + case MemoryState::SharedCode: + case MemoryState::GeneratedCode: + case MemoryState::CodeOut: + return is_in_region && !is_in_heap && !is_in_alias; + case MemoryState::Normal: + ASSERT(is_in_heap); + return is_in_region && !is_in_alias; + case MemoryState::Ipc: + case MemoryState::NonSecureIpc: + case MemoryState::NonDeviceIpc: + ASSERT(is_in_alias); + return is_in_region && !is_in_heap; + default: + return false; + } +} + +constexpr ResultCode PageTable::CheckMemoryState(const MemoryInfo& info, MemoryState state_mask, + MemoryState state, MemoryPermission perm_mask, + MemoryPermission perm, MemoryAttribute attr_mask, + MemoryAttribute attr) const { + // Validate the states match expectation + if ((info.state & state_mask) != state) { + return ERR_INVALID_ADDRESS_STATE; + } + if ((info.perm & perm_mask) != perm) { + return ERR_INVALID_ADDRESS_STATE; + } + if ((info.attribute & attr_mask) != attr) { + return ERR_INVALID_ADDRESS_STATE; + } + + return RESULT_SUCCESS; +} + +ResultCode PageTable::CheckMemoryState(MemoryState* out_state, MemoryPermission* out_perm, + MemoryAttribute* out_attr, VAddr addr, std::size_t size, + MemoryState state_mask, MemoryState state, + MemoryPermission perm_mask, MemoryPermission perm, + MemoryAttribute attr_mask, MemoryAttribute attr, + MemoryAttribute ignore_attr) { + std::lock_guard lock{page_table_lock}; + + // Get information about the first block + const VAddr last_addr{addr + size - 1}; + MemoryBlockManager::const_iterator it{block_manager->FindIterator(addr)}; + MemoryInfo info{it->GetMemoryInfo()}; + + // Validate all blocks in the range have correct state + const MemoryState first_state{info.state}; + const MemoryPermission first_perm{info.perm}; + const MemoryAttribute first_attr{info.attribute}; + + while (true) { + // Validate the current block + if (!(info.state == first_state)) { + return ERR_INVALID_ADDRESS_STATE; + } + if (!(info.perm == first_perm)) { + return ERR_INVALID_ADDRESS_STATE; + } + if (!((info.attribute | static_cast<MemoryAttribute>(ignore_attr)) == + (first_attr | static_cast<MemoryAttribute>(ignore_attr)))) { + return ERR_INVALID_ADDRESS_STATE; + } + + // Validate against the provided masks + CASCADE_CODE(CheckMemoryState(info, state_mask, state, perm_mask, perm, attr_mask, attr)); + + // Break once we're done + if (last_addr <= info.GetLastAddress()) { + break; + } + + // Advance our iterator + it++; + ASSERT(it != block_manager->cend()); + info = it->GetMemoryInfo(); + } + + // Write output state + if (out_state) { + *out_state = first_state; + } + if (out_perm) { + *out_perm = first_perm; + } + if (out_attr) { + *out_attr = first_attr & static_cast<MemoryAttribute>(~ignore_attr); + } + + return RESULT_SUCCESS; +} + +} // namespace Kernel::Memory diff --git a/src/core/hle/kernel/memory/page_table.h b/src/core/hle/kernel/memory/page_table.h new file mode 100644 index 000000000..80384ab0f --- /dev/null +++ b/src/core/hle/kernel/memory/page_table.h @@ -0,0 +1,276 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include <list> +#include <memory> +#include <mutex> + +#include "common/common_funcs.h" +#include "common/common_types.h" +#include "common/page_table.h" +#include "core/file_sys/program_metadata.h" +#include "core/hle/kernel/memory/memory_block.h" +#include "core/hle/kernel/memory/memory_manager.h" + +namespace Core { +class System; +} + +namespace Kernel::Memory { + +class MemoryBlockManager; + +class PageTable final : NonCopyable { +public: + explicit PageTable(Core::System& system); + + ResultCode InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr, + VAddr code_addr, std::size_t code_size, + Memory::MemoryManager::Pool pool); + ResultCode MapProcessCode(VAddr addr, std::size_t pages_count, MemoryState state, + MemoryPermission perm); + ResultCode MapProcessCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size); + ResultCode UnmapProcessCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size); + ResultCode MapPhysicalMemory(VAddr addr, std::size_t size); + ResultCode UnmapPhysicalMemory(VAddr addr, std::size_t size); + ResultCode UnmapMemory(VAddr addr, std::size_t size); + ResultCode Map(VAddr dst_addr, VAddr src_addr, std::size_t size); + ResultCode Unmap(VAddr dst_addr, VAddr src_addr, std::size_t size); + ResultCode MapPages(VAddr addr, PageLinkedList& page_linked_list, MemoryState state, + MemoryPermission perm); + ResultCode SetCodeMemoryPermission(VAddr addr, std::size_t size, MemoryPermission perm); + MemoryInfo QueryInfo(VAddr addr); + ResultCode ReserveTransferMemory(VAddr addr, std::size_t size, MemoryPermission perm); + ResultCode ResetTransferMemory(VAddr addr, std::size_t size); + ResultCode SetMemoryAttribute(VAddr addr, std::size_t size, MemoryAttribute mask, + MemoryAttribute value); + ResultCode SetHeapCapacity(std::size_t new_heap_capacity); + ResultVal<VAddr> SetHeapSize(std::size_t size); + ResultVal<VAddr> AllocateAndMapMemory(std::size_t needed_num_pages, std::size_t align, + bool is_map_only, VAddr region_start, + std::size_t region_num_pages, MemoryState state, + MemoryPermission perm, PAddr map_addr = 0); + + Common::PageTable& PageTableImpl() { + return page_table_impl; + } + + const Common::PageTable& PageTableImpl() const { + return page_table_impl; + } + +private: + enum class OperationType : u32 { + Map, + MapGroup, + Unmap, + ChangePermissions, + ChangePermissionsAndRefresh, + }; + + static constexpr MemoryAttribute DefaultMemoryIgnoreAttr = + MemoryAttribute::DontCareMask | MemoryAttribute::IpcLocked | MemoryAttribute::DeviceShared; + + ResultCode InitializeMemoryLayout(VAddr start, VAddr end); + ResultCode MapPages(VAddr addr, const PageLinkedList& page_linked_list, MemoryPermission perm); + void MapPhysicalMemory(PageLinkedList& page_linked_list, VAddr start, VAddr end); + bool IsRegionMapped(VAddr address, u64 size); + bool IsRegionContiguous(VAddr addr, u64 size) const; + void AddRegionToPages(VAddr start, std::size_t num_pages, PageLinkedList& page_linked_list); + MemoryInfo QueryInfoImpl(VAddr addr); + VAddr AllocateVirtualMemory(VAddr start, std::size_t region_num_pages, u64 needed_num_pages, + std::size_t align); + ResultCode Operate(VAddr addr, std::size_t num_pages, const PageLinkedList& page_group, + OperationType operation); + ResultCode Operate(VAddr addr, std::size_t num_pages, MemoryPermission perm, + OperationType operation, PAddr map_addr = 0); + constexpr VAddr GetRegionAddress(MemoryState state) const; + constexpr std::size_t GetRegionSize(MemoryState state) const; + constexpr bool CanContain(VAddr addr, std::size_t size, MemoryState state) const; + + constexpr ResultCode CheckMemoryState(const MemoryInfo& info, MemoryState state_mask, + MemoryState state, MemoryPermission perm_mask, + MemoryPermission perm, MemoryAttribute attr_mask, + MemoryAttribute attr) const; + ResultCode CheckMemoryState(MemoryState* out_state, MemoryPermission* out_perm, + MemoryAttribute* out_attr, VAddr addr, std::size_t size, + MemoryState state_mask, MemoryState state, + MemoryPermission perm_mask, MemoryPermission perm, + MemoryAttribute attr_mask, MemoryAttribute attr, + MemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr); + ResultCode CheckMemoryState(VAddr addr, std::size_t size, MemoryState state_mask, + MemoryState state, MemoryPermission perm_mask, + MemoryPermission perm, MemoryAttribute attr_mask, + MemoryAttribute attr, + MemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) { + return CheckMemoryState(nullptr, nullptr, nullptr, addr, size, state_mask, state, perm_mask, + perm, attr_mask, attr, ignore_attr); + } + + std::recursive_mutex page_table_lock; + std::unique_ptr<MemoryBlockManager> block_manager; + +public: + constexpr VAddr GetAddressSpaceStart() const { + return address_space_start; + } + constexpr VAddr GetAddressSpaceEnd() const { + return address_space_end; + } + constexpr std::size_t GetAddressSpaceSize() const { + return address_space_end - address_space_start; + } + constexpr VAddr GetHeapRegionStart() const { + return heap_region_start; + } + constexpr VAddr GetHeapRegionEnd() const { + return heap_region_end; + } + constexpr std::size_t GetHeapRegionSize() const { + return heap_region_end - heap_region_start; + } + constexpr VAddr GetAliasRegionStart() const { + return alias_region_start; + } + constexpr VAddr GetAliasRegionEnd() const { + return alias_region_end; + } + constexpr std::size_t GetAliasRegionSize() const { + return alias_region_end - alias_region_start; + } + constexpr VAddr GetStackRegionStart() const { + return stack_region_start; + } + constexpr VAddr GetStackRegionEnd() const { + return stack_region_end; + } + constexpr std::size_t GetStackRegionSize() const { + return stack_region_end - stack_region_start; + } + constexpr VAddr GetKernelMapRegionStart() const { + return kernel_map_region_start; + } + constexpr VAddr GetKernelMapRegionEnd() const { + return kernel_map_region_end; + } + constexpr VAddr GetCodeRegionStart() const { + return code_region_start; + } + constexpr VAddr GetCodeRegionEnd() const { + return code_region_end; + } + constexpr VAddr GetAliasCodeRegionStart() const { + return alias_code_region_start; + } + constexpr VAddr GetAliasCodeRegionSize() const { + return alias_code_region_end - alias_code_region_start; + } + constexpr std::size_t GetAddressSpaceWidth() const { + return address_space_width; + } + constexpr std::size_t GetHeapSize() { + return current_heap_addr - heap_region_start; + } + constexpr std::size_t GetTotalHeapSize() { + return GetHeapSize() + physical_memory_usage; + } + constexpr bool IsInsideAddressSpace(VAddr address, std::size_t size) const { + return address_space_start <= address && address + size - 1 <= address_space_end - 1; + } + constexpr bool IsOutsideAliasRegion(VAddr address, std::size_t size) const { + return alias_region_start > address || address + size - 1 > alias_region_end - 1; + } + constexpr bool IsOutsideStackRegion(VAddr address, std::size_t size) const { + return stack_region_start > address || address + size - 1 > stack_region_end - 1; + } + constexpr bool IsInvalidRegion(VAddr address, std::size_t size) const { + return address + size - 1 > GetAliasCodeRegionStart() + GetAliasCodeRegionSize() - 1; + } + constexpr bool IsInsideHeapRegion(VAddr address, std::size_t size) const { + return address + size > heap_region_start && heap_region_end > address; + } + constexpr bool IsInsideAliasRegion(VAddr address, std::size_t size) const { + return address + size > alias_region_start && alias_region_end > address; + } + constexpr bool IsOutsideASLRRegion(VAddr address, std::size_t size) const { + if (IsInvalidRegion(address, size)) { + return true; + } + if (IsInsideHeapRegion(address, size)) { + return true; + } + if (IsInsideAliasRegion(address, size)) { + return true; + } + return {}; + } + constexpr bool IsInsideASLRRegion(VAddr address, std::size_t size) const { + return !IsOutsideASLRRegion(address, size); + } + constexpr PAddr GetPhysicalAddr(VAddr addr) { + return page_table_impl.backing_addr[addr >> Memory::PageBits] + addr; + } + +private: + constexpr bool Contains(VAddr addr) const { + return address_space_start <= addr && addr <= address_space_end - 1; + } + constexpr bool Contains(VAddr addr, std::size_t size) const { + return address_space_start <= addr && addr < addr + size && + addr + size - 1 <= address_space_end - 1; + } + constexpr bool IsKernel() const { + return is_kernel; + } + constexpr bool IsAslrEnabled() const { + return is_aslr_enabled; + } + + constexpr std::size_t GetNumGuardPages() const { + return IsKernel() ? 1 : 4; + } + + constexpr bool ContainsPages(VAddr addr, std::size_t num_pages) const { + return (address_space_start <= addr) && + (num_pages <= (address_space_end - address_space_start) / PageSize) && + (addr + num_pages * PageSize - 1 <= address_space_end - 1); + } + +private: + VAddr address_space_start{}; + VAddr address_space_end{}; + VAddr heap_region_start{}; + VAddr heap_region_end{}; + VAddr current_heap_end{}; + VAddr alias_region_start{}; + VAddr alias_region_end{}; + VAddr stack_region_start{}; + VAddr stack_region_end{}; + VAddr kernel_map_region_start{}; + VAddr kernel_map_region_end{}; + VAddr code_region_start{}; + VAddr code_region_end{}; + VAddr alias_code_region_start{}; + VAddr alias_code_region_end{}; + VAddr current_heap_addr{}; + + std::size_t heap_capacity{}; + std::size_t physical_memory_usage{}; + std::size_t max_heap_size{}; + std::size_t max_physical_memory_size{}; + std::size_t address_space_width{}; + + bool is_kernel{}; + bool is_aslr_enabled{}; + + MemoryManager::Pool memory_pool{MemoryManager::Pool::Application}; + + Common::PageTable page_table_impl; + + Core::System& system; +}; + +} // namespace Kernel::Memory diff --git a/src/core/hle/kernel/memory/slab_heap.h b/src/core/hle/kernel/memory/slab_heap.h new file mode 100644 index 000000000..be95fc3f7 --- /dev/null +++ b/src/core/hle/kernel/memory/slab_heap.h @@ -0,0 +1,164 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +// This file references various implementation details from Atmosphère, an open-source firmware for +// the Nintendo Switch. Copyright 2018-2020 Atmosphère-NX. + +#pragma once + +#include <atomic> + +#include "common/assert.h" +#include "common/common_funcs.h" +#include "common/common_types.h" + +namespace Kernel::Memory { + +namespace impl { + +class SlabHeapImpl final : NonCopyable { +public: + struct Node { + Node* next{}; + }; + + constexpr SlabHeapImpl() = default; + + void Initialize(std::size_t size) { + ASSERT(head == nullptr); + obj_size = size; + } + + constexpr std::size_t GetObjectSize() const { + return obj_size; + } + + Node* GetHead() const { + return head; + } + + void* Allocate() { + Node* ret = head.load(); + + do { + if (ret == nullptr) { + break; + } + } while (!head.compare_exchange_weak(ret, ret->next)); + + return ret; + } + + void Free(void* obj) { + Node* node = static_cast<Node*>(obj); + + Node* cur_head = head.load(); + do { + node->next = cur_head; + } while (!head.compare_exchange_weak(cur_head, node)); + } + +private: + std::atomic<Node*> head{}; + std::size_t obj_size{}; +}; + +} // namespace impl + +class SlabHeapBase : NonCopyable { +public: + constexpr SlabHeapBase() = default; + + constexpr bool Contains(uintptr_t addr) const { + return start <= addr && addr < end; + } + + constexpr std::size_t GetSlabHeapSize() const { + return (end - start) / GetObjectSize(); + } + + constexpr std::size_t GetObjectSize() const { + return impl.GetObjectSize(); + } + + constexpr uintptr_t GetSlabHeapAddress() const { + return start; + } + + std::size_t GetObjectIndexImpl(const void* obj) const { + return (reinterpret_cast<uintptr_t>(obj) - start) / GetObjectSize(); + } + + std::size_t GetPeakIndex() const { + return GetObjectIndexImpl(reinterpret_cast<const void*>(peak)); + } + + void* AllocateImpl() { + return impl.Allocate(); + } + + void FreeImpl(void* obj) { + // Don't allow freeing an object that wasn't allocated from this heap + ASSERT(Contains(reinterpret_cast<uintptr_t>(obj))); + impl.Free(obj); + } + + void InitializeImpl(std::size_t obj_size, void* memory, std::size_t memory_size) { + // Ensure we don't initialize a slab using null memory + ASSERT(memory != nullptr); + + // Initialize the base allocator + impl.Initialize(obj_size); + + // Set our tracking variables + const std::size_t num_obj = (memory_size / obj_size); + start = reinterpret_cast<uintptr_t>(memory); + end = start + num_obj * obj_size; + peak = start; + + // Free the objects + u8* cur = reinterpret_cast<u8*>(end); + + for (std::size_t i{}; i < num_obj; i++) { + cur -= obj_size; + impl.Free(cur); + } + } + +private: + using Impl = impl::SlabHeapImpl; + + Impl impl; + uintptr_t peak{}; + uintptr_t start{}; + uintptr_t end{}; +}; + +template <typename T> +class SlabHeap final : public SlabHeapBase { +public: + constexpr SlabHeap() : SlabHeapBase() {} + + void Initialize(void* memory, std::size_t memory_size) { + InitializeImpl(sizeof(T), memory, memory_size); + } + + T* Allocate() { + T* obj = static_cast<T*>(AllocateImpl()); + if (obj != nullptr) { + new (obj) T(); + } + return obj; + } + + void Free(T* obj) { + FreeImpl(obj); + } + + constexpr std::size_t GetObjectIndex(const T* obj) const { + return GetObjectIndexImpl(obj); + } +}; + +} // namespace Kernel::Memory diff --git a/src/core/hle/kernel/memory/system_control.cpp b/src/core/hle/kernel/memory/system_control.cpp new file mode 100644 index 000000000..9cae3c6cb --- /dev/null +++ b/src/core/hle/kernel/memory/system_control.cpp @@ -0,0 +1,41 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include <random> + +#include "core/hle/kernel/memory/system_control.h" + +namespace Kernel::Memory::SystemControl { + +u64 GenerateRandomU64ForInit() { + static std::random_device device; + static std::mt19937 gen(device()); + static std::uniform_int_distribution<u64> distribution(1, std::numeric_limits<u64>::max()); + return distribution(gen); +} + +template <typename F> +u64 GenerateUniformRange(u64 min, u64 max, F f) { + /* Handle the case where the difference is too large to represent. */ + if (max == std::numeric_limits<u64>::max() && min == std::numeric_limits<u64>::min()) { + return f(); + } + + /* Iterate until we get a value in range. */ + const u64 range_size = ((max + 1) - min); + const u64 effective_max = (std::numeric_limits<u64>::max() / range_size) * range_size; + while (true) { + if (const u64 rnd = f(); rnd < effective_max) { + return min + (rnd % range_size); + } + } +} + +u64 GenerateRandomRange(u64 min, u64 max) { + return GenerateUniformRange(min, max, GenerateRandomU64ForInit); +} + +} // namespace Kernel::Memory::SystemControl diff --git a/src/core/hle/kernel/memory/system_control.h b/src/core/hle/kernel/memory/system_control.h new file mode 100644 index 000000000..3fa93111d --- /dev/null +++ b/src/core/hle/kernel/memory/system_control.h @@ -0,0 +1,18 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include "common/common_types.h" + +namespace Kernel::Memory::SystemControl { + +u64 GenerateRandomU64ForInit(); + +template <typename F> +u64 GenerateUniformRange(u64 min, u64 max, F f); + +u64 GenerateRandomRange(u64 min, u64 max); + +} // namespace Kernel::Memory::SystemControl diff --git a/src/core/hle/kernel/physical_memory.h b/src/core/hle/kernel/physical_memory.h index b689e8e8b..7a0266780 100644 --- a/src/core/hle/kernel/physical_memory.h +++ b/src/core/hle/kernel/physical_memory.h @@ -4,6 +4,8 @@ #pragma once +#include <vector> + #include "common/alignment.h" namespace Kernel { diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp index edc414d69..36724569f 100644 --- a/src/core/hle/kernel/process.cpp +++ b/src/core/hle/kernel/process.cpp @@ -10,15 +10,18 @@ #include "common/assert.h" #include "common/logging/log.h" #include "core/core.h" +#include "core/device_memory.h" #include "core/file_sys/program_metadata.h" #include "core/hle/kernel/code_set.h" #include "core/hle/kernel/errors.h" #include "core/hle/kernel/kernel.h" +#include "core/hle/kernel/memory/memory_block_manager.h" +#include "core/hle/kernel/memory/page_table.h" +#include "core/hle/kernel/memory/slab_heap.h" #include "core/hle/kernel/process.h" #include "core/hle/kernel/resource_limit.h" #include "core/hle/kernel/scheduler.h" #include "core/hle/kernel/thread.h" -#include "core/hle/kernel/vm_manager.h" #include "core/memory.h" #include "core/settings.h" @@ -31,10 +34,8 @@ namespace { * @param kernel The kernel instance to create the main thread under. * @param priority The priority to give the main thread */ -void SetupMainThread(Process& owner_process, KernelCore& kernel, u32 priority) { - const auto& vm_manager = owner_process.VMManager(); - const VAddr entry_point = vm_manager.GetCodeRegionBaseAddress(); - const VAddr stack_top = vm_manager.GetTLSIORegionEndAddress(); +void SetupMainThread(Process& owner_process, KernelCore& kernel, u32 priority, VAddr stack_top) { + const VAddr entry_point = owner_process.PageTable().GetCodeRegionStart(); auto thread_res = Thread::Create(kernel, "main", entry_point, priority, 0, owner_process.GetIdealCore(), stack_top, owner_process); @@ -42,6 +43,8 @@ void SetupMainThread(Process& owner_process, KernelCore& kernel, u32 priority) { // Register 1 must be a handle to the main thread const Handle thread_handle = owner_process.GetHandleTable().Create(thread).Unwrap(); + thread->GetContext32().cpu_registers[0] = 0; + thread->GetContext64().cpu_registers[0] = 0; thread->GetContext32().cpu_registers[1] = thread_handle; thread->GetContext64().cpu_registers[1] = thread_handle; @@ -57,7 +60,8 @@ void SetupMainThread(Process& owner_process, KernelCore& kernel, u32 priority) { // (whichever page happens to have an available slot). class TLSPage { public: - static constexpr std::size_t num_slot_entries = Memory::PAGE_SIZE / Memory::TLS_ENTRY_SIZE; + static constexpr std::size_t num_slot_entries = + Core::Memory::PAGE_SIZE / Core::Memory::TLS_ENTRY_SIZE; explicit TLSPage(VAddr address) : base_address{address} {} @@ -76,7 +80,7 @@ public: } is_slot_used[i] = true; - return base_address + (i * Memory::TLS_ENTRY_SIZE); + return base_address + (i * Core::Memory::TLS_ENTRY_SIZE); } return std::nullopt; @@ -86,15 +90,15 @@ public: // Ensure that all given addresses are consistent with how TLS pages // are intended to be used when releasing slots. ASSERT(IsWithinPage(address)); - ASSERT((address % Memory::TLS_ENTRY_SIZE) == 0); + ASSERT((address % Core::Memory::TLS_ENTRY_SIZE) == 0); - const std::size_t index = (address - base_address) / Memory::TLS_ENTRY_SIZE; + const std::size_t index = (address - base_address) / Core::Memory::TLS_ENTRY_SIZE; is_slot_used[index] = false; } private: bool IsWithinPage(VAddr address) const { - return base_address <= address && address < base_address + Memory::PAGE_SIZE; + return base_address <= address && address < base_address + Core::Memory::PAGE_SIZE; } VAddr base_address; @@ -106,7 +110,7 @@ std::shared_ptr<Process> Process::Create(Core::System& system, std::string name, std::shared_ptr<Process> process = std::make_shared<Process>(system); process->name = std::move(name); - process->resource_limit = kernel.GetSystemResourceLimit(); + process->resource_limit = ResourceLimit::Create(kernel); process->status = ProcessStatus::Created; process->program_id = 0; process->process_id = type == ProcessType::KernelInternal ? kernel.CreateNewKernelProcessID() @@ -127,7 +131,14 @@ std::shared_ptr<ResourceLimit> Process::GetResourceLimit() const { } u64 Process::GetTotalPhysicalMemoryAvailable() const { - return vm_manager.GetTotalPhysicalMemoryAvailable(); + const u64 capacity{resource_limit->GetCurrentResourceValue(ResourceType::PhysicalMemory) + + page_table->GetTotalHeapSize() + image_size + main_thread_stack_size}; + + if (capacity < memory_usage_capacity) { + return capacity; + } + + return memory_usage_capacity; } u64 Process::GetTotalPhysicalMemoryAvailableWithoutSystemResource() const { @@ -135,8 +146,7 @@ u64 Process::GetTotalPhysicalMemoryAvailableWithoutSystemResource() const { } u64 Process::GetTotalPhysicalMemoryUsed() const { - return vm_manager.GetCurrentHeapSize() + main_thread_stack_size + code_memory_size + - GetSystemResourceUsage(); + return image_size + main_thread_stack_size + page_table->GetTotalHeapSize(); } u64 Process::GetTotalPhysicalMemoryUsedWithoutSystemResource() const { @@ -209,33 +219,82 @@ ResultCode Process::ClearSignalState() { return RESULT_SUCCESS; } -ResultCode Process::LoadFromMetadata(const FileSys::ProgramMetadata& metadata) { +ResultCode Process::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, + std::size_t code_size) { program_id = metadata.GetTitleID(); ideal_core = metadata.GetMainThreadCore(); is_64bit_process = metadata.Is64BitProgram(); system_resource_size = metadata.GetSystemResourceSize(); + image_size = code_size; + + // Initialize proces address space + if (const ResultCode result{ + page_table->InitializeForProcess(metadata.GetAddressSpaceType(), false, 0x8000000, + code_size, Memory::MemoryManager::Pool::Application)}; + result.IsError()) { + return result; + } - vm_manager.Reset(metadata.GetAddressSpaceType()); + // Map process code region + if (const ResultCode result{page_table->MapProcessCode( + page_table->GetCodeRegionStart(), code_size / Memory::PageSize, + Memory::MemoryState::Code, Memory::MemoryPermission::None)}; + result.IsError()) { + return result; + } - const auto& caps = metadata.GetKernelCapabilities(); - const auto capability_init_result = - capabilities.InitializeForUserProcess(caps.data(), caps.size(), vm_manager); - if (capability_init_result.IsError()) { - return capability_init_result; + // Initialize process capabilities + const auto& caps{metadata.GetKernelCapabilities()}; + if (const ResultCode result{ + capabilities.InitializeForUserProcess(caps.data(), caps.size(), *page_table)}; + result.IsError()) { + return result; } + // Set memory usage capacity + switch (metadata.GetAddressSpaceType()) { + case FileSys::ProgramAddressSpaceType::Is32Bit: + case FileSys::ProgramAddressSpaceType::Is36Bit: + case FileSys::ProgramAddressSpaceType::Is39Bit: + memory_usage_capacity = page_table->GetHeapRegionEnd() - page_table->GetHeapRegionStart(); + break; + + case FileSys::ProgramAddressSpaceType::Is32BitNoMap: + memory_usage_capacity = page_table->GetHeapRegionEnd() - page_table->GetHeapRegionStart() + + page_table->GetAliasRegionEnd() - page_table->GetAliasRegionStart(); + break; + + default: + UNREACHABLE(); + } + + // Set initial resource limits + resource_limit->SetLimitValue( + ResourceType::PhysicalMemory, + kernel.MemoryManager().GetSize(Memory::MemoryManager::Pool::Application)); + resource_limit->SetLimitValue(ResourceType::Threads, 608); + resource_limit->SetLimitValue(ResourceType::Events, 700); + resource_limit->SetLimitValue(ResourceType::TransferMemory, 128); + resource_limit->SetLimitValue(ResourceType::Sessions, 894); + ASSERT(resource_limit->Reserve(ResourceType::PhysicalMemory, code_size)); + + // Create TLS region + tls_region_address = CreateTLSRegion(); + return handle_table.SetSize(capabilities.GetHandleTableSize()); } void Process::Run(s32 main_thread_priority, u64 stack_size) { AllocateMainThreadStack(stack_size); - tls_region_address = CreateTLSRegion(); - vm_manager.LogLayout(); + const std::size_t heap_capacity{memory_usage_capacity - main_thread_stack_size - image_size}; + ASSERT(!page_table->SetHeapCapacity(heap_capacity).IsError()); ChangeStatus(ProcessStatus::Running); - SetupMainThread(*this, kernel, main_thread_priority); + SetupMainThread(*this, kernel, main_thread_priority, main_thread_stack_top); + resource_limit->Reserve(ResourceType::Threads, 1); + resource_limit->Reserve(ResourceType::PhysicalMemory, main_thread_stack_size); } void Process::PrepareForTermination() { @@ -279,32 +338,37 @@ static auto FindTLSPageWithAvailableSlots(std::vector<TLSPage>& tls_pages) { } VAddr Process::CreateTLSRegion() { - auto tls_page_iter = FindTLSPageWithAvailableSlots(tls_pages); + if (auto tls_page_iter{FindTLSPageWithAvailableSlots(tls_pages)}; + tls_page_iter != tls_pages.cend()) { + return *tls_page_iter->ReserveSlot(); + } - if (tls_page_iter == tls_pages.cend()) { - const auto region_address = - vm_manager.FindFreeRegion(vm_manager.GetTLSIORegionBaseAddress(), - vm_manager.GetTLSIORegionEndAddress(), Memory::PAGE_SIZE); - ASSERT(region_address.Succeeded()); + Memory::Page* const tls_page_ptr{kernel.GetUserSlabHeapPages().Allocate()}; + ASSERT(tls_page_ptr); - const auto map_result = vm_manager.MapMemoryBlock( - *region_address, std::make_shared<PhysicalMemory>(Memory::PAGE_SIZE), 0, - Memory::PAGE_SIZE, MemoryState::ThreadLocal); - ASSERT(map_result.Succeeded()); + const VAddr start{page_table->GetKernelMapRegionStart()}; + const VAddr size{page_table->GetKernelMapRegionEnd() - start}; + const PAddr tls_map_addr{system.DeviceMemory().GetPhysicalAddr(tls_page_ptr)}; + const VAddr tls_page_addr{ + page_table + ->AllocateAndMapMemory(1, Memory::PageSize, true, start, size / Memory::PageSize, + Memory::MemoryState::ThreadLocal, + Memory::MemoryPermission::ReadAndWrite, tls_map_addr) + .ValueOr(0)}; - tls_pages.emplace_back(*region_address); + ASSERT(tls_page_addr); - const auto reserve_result = tls_pages.back().ReserveSlot(); - ASSERT(reserve_result.has_value()); + std::memset(tls_page_ptr, 0, Memory::PageSize); + tls_pages.emplace_back(tls_page_addr); - return *reserve_result; - } + const auto reserve_result{tls_pages.back().ReserveSlot()}; + ASSERT(reserve_result.has_value()); - return *tls_page_iter->ReserveSlot(); + return *reserve_result; } void Process::FreeTLSRegion(VAddr tls_address) { - const VAddr aligned_address = Common::AlignDown(tls_address, Memory::PAGE_SIZE); + const VAddr aligned_address = Common::AlignDown(tls_address, Core::Memory::PAGE_SIZE); auto iter = std::find_if(tls_pages.begin(), tls_pages.end(), [aligned_address](const auto& page) { return page.GetBaseAddress() == aligned_address; @@ -317,28 +381,22 @@ void Process::FreeTLSRegion(VAddr tls_address) { iter->ReleaseSlot(tls_address); } -void Process::LoadModule(CodeSet module_, VAddr base_addr) { - code_memory_size += module_.memory.size(); - - const auto memory = std::make_shared<PhysicalMemory>(std::move(module_.memory)); - - const auto MapSegment = [&](const CodeSet::Segment& segment, VMAPermission permissions, - MemoryState memory_state) { - const auto vma = vm_manager - .MapMemoryBlock(segment.addr + base_addr, memory, segment.offset, - segment.size, memory_state) - .Unwrap(); - vm_manager.Reprotect(vma, permissions); +void Process::LoadModule(CodeSet code_set, VAddr base_addr) { + const auto ReprotectSegment = [&](const CodeSet::Segment& segment, + Memory::MemoryPermission permission) { + page_table->SetCodeMemoryPermission(segment.addr + base_addr, segment.size, permission); }; - // Map CodeSet segments - MapSegment(module_.CodeSegment(), VMAPermission::ReadExecute, MemoryState::Code); - MapSegment(module_.RODataSegment(), VMAPermission::Read, MemoryState::CodeData); - MapSegment(module_.DataSegment(), VMAPermission::ReadWrite, MemoryState::CodeData); + system.Memory().WriteBlock(*this, base_addr, code_set.memory.data(), code_set.memory.size()); + + ReprotectSegment(code_set.CodeSegment(), Memory::MemoryPermission::ReadAndExecute); + ReprotectSegment(code_set.RODataSegment(), Memory::MemoryPermission::Read); + ReprotectSegment(code_set.DataSegment(), Memory::MemoryPermission::ReadAndWrite); } Process::Process(Core::System& system) - : SynchronizationObject{system.Kernel()}, vm_manager{system}, + : SynchronizationObject{system.Kernel()}, page_table{std::make_unique<Memory::PageTable>( + system)}, address_arbiter{system}, mutex{system}, system{system} {} Process::~Process() = default; @@ -361,16 +419,24 @@ void Process::ChangeStatus(ProcessStatus new_status) { Signal(); } -void Process::AllocateMainThreadStack(u64 stack_size) { +ResultCode Process::AllocateMainThreadStack(std::size_t stack_size) { + ASSERT(stack_size); + // The kernel always ensures that the given stack size is page aligned. - main_thread_stack_size = Common::AlignUp(stack_size, Memory::PAGE_SIZE); - - // Allocate and map the main thread stack - const VAddr mapping_address = vm_manager.GetTLSIORegionEndAddress() - main_thread_stack_size; - vm_manager - .MapMemoryBlock(mapping_address, std::make_shared<PhysicalMemory>(main_thread_stack_size), - 0, main_thread_stack_size, MemoryState::Stack) - .Unwrap(); + main_thread_stack_size = Common::AlignUp(stack_size, Memory::PageSize); + + const VAddr start{page_table->GetStackRegionStart()}; + const std::size_t size{page_table->GetStackRegionEnd() - start}; + + CASCADE_RESULT(main_thread_stack_top, + page_table->AllocateAndMapMemory( + main_thread_stack_size / Memory::PageSize, Memory::PageSize, false, start, + size / Memory::PageSize, Memory::MemoryState::Stack, + Memory::MemoryPermission::ReadAndWrite)); + + main_thread_stack_top += main_thread_stack_size; + + return RESULT_SUCCESS; } } // namespace Kernel diff --git a/src/core/hle/kernel/process.h b/src/core/hle/kernel/process.h index 4887132a7..9dabe3568 100644 --- a/src/core/hle/kernel/process.h +++ b/src/core/hle/kernel/process.h @@ -16,7 +16,6 @@ #include "core/hle/kernel/mutex.h" #include "core/hle/kernel/process_capability.h" #include "core/hle/kernel/synchronization_object.h" -#include "core/hle/kernel/vm_manager.h" #include "core/hle/result.h" namespace Core { @@ -36,6 +35,10 @@ class TLSPage; struct CodeSet; +namespace Memory { +class PageTable; +} + enum class MemoryRegion : u16 { APPLICATION = 1, SYSTEM = 2, @@ -100,14 +103,14 @@ public: return HANDLE_TYPE; } - /// Gets a reference to the process' memory manager. - Kernel::VMManager& VMManager() { - return vm_manager; + /// Gets a reference to the process' page table. + Memory::PageTable& PageTable() { + return *page_table; } - /// Gets a const reference to the process' memory manager. - const Kernel::VMManager& VMManager() const { - return vm_manager; + /// Gets const a reference to the process' page table. + const Memory::PageTable& PageTable() const { + return *page_table; } /// Gets a reference to the process' handle table. @@ -273,7 +276,7 @@ public: * @returns RESULT_SUCCESS if all relevant metadata was able to be * loaded and parsed. Otherwise, an error code is returned. */ - ResultCode LoadFromMetadata(const FileSys::ProgramMetadata& metadata); + ResultCode LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size); /** * Starts the main application thread for this process. @@ -289,7 +292,7 @@ public: */ void PrepareForTermination(); - void LoadModule(CodeSet module_, VAddr base_addr); + void LoadModule(CodeSet code_set, VAddr base_addr); /////////////////////////////////////////////////////////////////////////////////////////////// // Thread-local storage management @@ -313,16 +316,10 @@ private: void ChangeStatus(ProcessStatus new_status); /// Allocates the main thread stack for the process, given the stack size in bytes. - void AllocateMainThreadStack(u64 stack_size); - - /// Memory manager for this process. - Kernel::VMManager vm_manager; - - /// Size of the main thread's stack in bytes. - u64 main_thread_stack_size = 0; + ResultCode AllocateMainThreadStack(std::size_t stack_size); - /// Size of the loaded code memory in bytes. - u64 code_memory_size = 0; + /// Memory manager for this process + std::unique_ptr<Memory::PageTable> page_table; /// Current status of the process ProcessStatus status{}; @@ -390,6 +387,18 @@ private: /// Name of this process std::string name; + + /// Address of the top of the main thread's stack + VAddr main_thread_stack_top{}; + + /// Size of the main thread's stack + std::size_t main_thread_stack_size{}; + + /// Memory usage capacity for the process + std::size_t memory_usage_capacity{}; + + /// Process total image size + std::size_t image_size{}; }; } // namespace Kernel diff --git a/src/core/hle/kernel/process_capability.cpp b/src/core/hle/kernel/process_capability.cpp index 583e35b79..48e5ae682 100644 --- a/src/core/hle/kernel/process_capability.cpp +++ b/src/core/hle/kernel/process_capability.cpp @@ -5,8 +5,8 @@ #include "common/bit_util.h" #include "core/hle/kernel/errors.h" #include "core/hle/kernel/handle_table.h" +#include "core/hle/kernel/memory/page_table.h" #include "core/hle/kernel/process_capability.h" -#include "core/hle/kernel/vm_manager.h" namespace Kernel { namespace { @@ -66,7 +66,7 @@ u32 GetFlagBitOffset(CapabilityType type) { ResultCode ProcessCapabilities::InitializeForKernelProcess(const u32* capabilities, std::size_t num_capabilities, - VMManager& vm_manager) { + Memory::PageTable& page_table) { Clear(); // Allow all cores and priorities. @@ -74,15 +74,15 @@ ResultCode ProcessCapabilities::InitializeForKernelProcess(const u32* capabiliti priority_mask = 0xFFFFFFFFFFFFFFFF; kernel_version = PackedKernelVersion; - return ParseCapabilities(capabilities, num_capabilities, vm_manager); + return ParseCapabilities(capabilities, num_capabilities, page_table); } ResultCode ProcessCapabilities::InitializeForUserProcess(const u32* capabilities, std::size_t num_capabilities, - VMManager& vm_manager) { + Memory::PageTable& page_table) { Clear(); - return ParseCapabilities(capabilities, num_capabilities, vm_manager); + return ParseCapabilities(capabilities, num_capabilities, page_table); } void ProcessCapabilities::InitializeForMetadatalessProcess() { @@ -105,7 +105,7 @@ void ProcessCapabilities::InitializeForMetadatalessProcess() { ResultCode ProcessCapabilities::ParseCapabilities(const u32* capabilities, std::size_t num_capabilities, - VMManager& vm_manager) { + Memory::PageTable& page_table) { u32 set_flags = 0; u32 set_svc_bits = 0; @@ -127,13 +127,13 @@ ResultCode ProcessCapabilities::ParseCapabilities(const u32* capabilities, return ERR_INVALID_COMBINATION; } - const auto result = HandleMapPhysicalFlags(descriptor, size_flags, vm_manager); + const auto result = HandleMapPhysicalFlags(descriptor, size_flags, page_table); if (result.IsError()) { return result; } } else { const auto result = - ParseSingleFlagCapability(set_flags, set_svc_bits, descriptor, vm_manager); + ParseSingleFlagCapability(set_flags, set_svc_bits, descriptor, page_table); if (result.IsError()) { return result; } @@ -144,7 +144,7 @@ ResultCode ProcessCapabilities::ParseCapabilities(const u32* capabilities, } ResultCode ProcessCapabilities::ParseSingleFlagCapability(u32& set_flags, u32& set_svc_bits, - u32 flag, VMManager& vm_manager) { + u32 flag, Memory::PageTable& page_table) { const auto type = GetCapabilityType(flag); if (type == CapabilityType::Unset) { @@ -172,7 +172,7 @@ ResultCode ProcessCapabilities::ParseSingleFlagCapability(u32& set_flags, u32& s case CapabilityType::Syscall: return HandleSyscallFlags(set_svc_bits, flag); case CapabilityType::MapIO: - return HandleMapIOFlags(flag, vm_manager); + return HandleMapIOFlags(flag, page_table); case CapabilityType::Interrupt: return HandleInterruptFlags(flag); case CapabilityType::ProgramType: @@ -269,12 +269,12 @@ ResultCode ProcessCapabilities::HandleSyscallFlags(u32& set_svc_bits, u32 flags) } ResultCode ProcessCapabilities::HandleMapPhysicalFlags(u32 flags, u32 size_flags, - VMManager& vm_manager) { + Memory::PageTable& page_table) { // TODO(Lioncache): Implement once the memory manager can handle this. return RESULT_SUCCESS; } -ResultCode ProcessCapabilities::HandleMapIOFlags(u32 flags, VMManager& vm_manager) { +ResultCode ProcessCapabilities::HandleMapIOFlags(u32 flags, Memory::PageTable& page_table) { // TODO(Lioncache): Implement once the memory manager can handle this. return RESULT_SUCCESS; } diff --git a/src/core/hle/kernel/process_capability.h b/src/core/hle/kernel/process_capability.h index 5cdd80747..ea9d12c16 100644 --- a/src/core/hle/kernel/process_capability.h +++ b/src/core/hle/kernel/process_capability.h @@ -12,7 +12,9 @@ union ResultCode; namespace Kernel { -class VMManager; +namespace Memory { +class PageTable; +} /// The possible types of programs that may be indicated /// by the program type capability descriptor. @@ -81,27 +83,27 @@ public: /// /// @param capabilities The capabilities to parse /// @param num_capabilities The number of capabilities to parse. - /// @param vm_manager The memory manager to use for handling any mapping-related + /// @param page_table The memory manager to use for handling any mapping-related /// operations (such as mapping IO memory, etc). /// /// @returns RESULT_SUCCESS if this capabilities instance was able to be initialized, /// otherwise, an error code upon failure. /// ResultCode InitializeForKernelProcess(const u32* capabilities, std::size_t num_capabilities, - VMManager& vm_manager); + Memory::PageTable& page_table); /// Initializes this process capabilities instance for a userland process. /// /// @param capabilities The capabilities to parse. /// @param num_capabilities The total number of capabilities to parse. - /// @param vm_manager The memory manager to use for handling any mapping-related + /// @param page_table The memory manager to use for handling any mapping-related /// operations (such as mapping IO memory, etc). /// /// @returns RESULT_SUCCESS if this capabilities instance was able to be initialized, /// otherwise, an error code upon failure. /// ResultCode InitializeForUserProcess(const u32* capabilities, std::size_t num_capabilities, - VMManager& vm_manager); + Memory::PageTable& page_table); /// Initializes this process capabilities instance for a process that does not /// have any metadata to parse. @@ -181,13 +183,13 @@ private: /// /// @param capabilities The sequence of capability descriptors to parse. /// @param num_capabilities The number of descriptors within the given sequence. - /// @param vm_manager The memory manager that will perform any memory + /// @param page_table The memory manager that will perform any memory /// mapping if necessary. /// /// @return RESULT_SUCCESS if no errors occur, otherwise an error code. /// ResultCode ParseCapabilities(const u32* capabilities, std::size_t num_capabilities, - VMManager& vm_manager); + Memory::PageTable& page_table); /// Attempts to parse a capability descriptor that is only represented by a /// single flag set. @@ -196,13 +198,13 @@ private: /// flags being initialized more than once when they shouldn't be. /// @param set_svc_bits Running set of bits representing the allowed supervisor calls mask. /// @param flag The flag to attempt to parse. - /// @param vm_manager The memory manager that will perform any memory + /// @param page_table The memory manager that will perform any memory /// mapping if necessary. /// /// @return RESULT_SUCCESS if no errors occurred, otherwise an error code. /// ResultCode ParseSingleFlagCapability(u32& set_flags, u32& set_svc_bits, u32 flag, - VMManager& vm_manager); + Memory::PageTable& page_table); /// Clears the internal state of this process capability instance. Necessary, /// to have a sane starting point due to us allowing running executables without @@ -226,10 +228,10 @@ private: ResultCode HandleSyscallFlags(u32& set_svc_bits, u32 flags); /// Handles flags related to mapping physical memory pages. - ResultCode HandleMapPhysicalFlags(u32 flags, u32 size_flags, VMManager& vm_manager); + ResultCode HandleMapPhysicalFlags(u32 flags, u32 size_flags, Memory::PageTable& page_table); /// Handles flags related to mapping IO pages. - ResultCode HandleMapIOFlags(u32 flags, VMManager& vm_manager); + ResultCode HandleMapIOFlags(u32 flags, Memory::PageTable& page_table); /// Handles flags related to the interrupt capability flags. ResultCode HandleInterruptFlags(u32 flags); diff --git a/src/core/hle/kernel/resource_limit.cpp b/src/core/hle/kernel/resource_limit.cpp index b53423462..96e5b9892 100644 --- a/src/core/hle/kernel/resource_limit.cpp +++ b/src/core/hle/kernel/resource_limit.cpp @@ -16,26 +16,60 @@ constexpr std::size_t ResourceTypeToIndex(ResourceType type) { ResourceLimit::ResourceLimit(KernelCore& kernel) : Object{kernel} {} ResourceLimit::~ResourceLimit() = default; +bool ResourceLimit::Reserve(ResourceType resource, s64 amount) { + return Reserve(resource, amount, 10000000000); +} + +bool ResourceLimit::Reserve(ResourceType resource, s64 amount, u64 timeout) { + const std::size_t index{ResourceTypeToIndex(resource)}; + + s64 new_value = current[index] + amount; + while (new_value > limit[index] && available[index] + amount <= limit[index]) { + // TODO(bunnei): This is wrong for multicore, we should wait the calling thread for timeout + new_value = current[index] + amount; + + if (timeout >= 0) { + break; + } + } + + if (new_value <= limit[index]) { + current[index] = new_value; + return true; + } + return false; +} + +void ResourceLimit::Release(ResourceType resource, u64 amount) { + Release(resource, amount, amount); +} + +void ResourceLimit::Release(ResourceType resource, u64 used_amount, u64 available_amount) { + const std::size_t index{ResourceTypeToIndex(resource)}; + + current[index] -= used_amount; + available[index] -= available_amount; +} + std::shared_ptr<ResourceLimit> ResourceLimit::Create(KernelCore& kernel) { return std::make_shared<ResourceLimit>(kernel); } s64 ResourceLimit::GetCurrentResourceValue(ResourceType resource) const { - return values.at(ResourceTypeToIndex(resource)); + return limit.at(ResourceTypeToIndex(resource)) - current.at(ResourceTypeToIndex(resource)); } s64 ResourceLimit::GetMaxResourceValue(ResourceType resource) const { - return limits.at(ResourceTypeToIndex(resource)); + return limit.at(ResourceTypeToIndex(resource)); } ResultCode ResourceLimit::SetLimitValue(ResourceType resource, s64 value) { - const auto index = ResourceTypeToIndex(resource); - - if (value < values[index]) { + const std::size_t index{ResourceTypeToIndex(resource)}; + if (current[index] <= value) { + limit[index] = value; + return RESULT_SUCCESS; + } else { return ERR_INVALID_STATE; } - - values[index] = value; - return RESULT_SUCCESS; } } // namespace Kernel diff --git a/src/core/hle/kernel/resource_limit.h b/src/core/hle/kernel/resource_limit.h index 53b89e621..936cc4d0f 100644 --- a/src/core/hle/kernel/resource_limit.h +++ b/src/core/hle/kernel/resource_limit.h @@ -51,6 +51,11 @@ public: return HANDLE_TYPE; } + bool Reserve(ResourceType resource, s64 amount); + bool Reserve(ResourceType resource, s64 amount, u64 timeout); + void Release(ResourceType resource, u64 amount); + void Release(ResourceType resource, u64 used_amount, u64 available_amount); + /** * Gets the current value for the specified resource. * @param resource Requested resource type @@ -91,10 +96,9 @@ private: using ResourceArray = std::array<s64, static_cast<std::size_t>(ResourceType::ResourceTypeCount)>; - /// Maximum values a resource type may reach. - ResourceArray limits{}; - /// Current resource limit values. - ResourceArray values{}; + ResourceArray limit{}; + ResourceArray current{}; + ResourceArray available{}; }; } // namespace Kernel diff --git a/src/core/hle/kernel/server_session.cpp b/src/core/hle/kernel/server_session.cpp index 4604e35c5..0f102ca44 100644 --- a/src/core/hle/kernel/server_session.cpp +++ b/src/core/hle/kernel/server_session.cpp @@ -134,7 +134,8 @@ ResultCode ServerSession::HandleDomainSyncRequest(Kernel::HLERequestContext& con return RESULT_SUCCESS; } -ResultCode ServerSession::QueueSyncRequest(std::shared_ptr<Thread> thread, Memory::Memory& memory) { +ResultCode ServerSession::QueueSyncRequest(std::shared_ptr<Thread> thread, + Core::Memory::Memory& memory) { u32* cmd_buf{reinterpret_cast<u32*>(memory.GetPointer(thread->GetTLSAddress()))}; std::shared_ptr<Kernel::HLERequestContext> context{ std::make_shared<Kernel::HLERequestContext>(SharedFrom(this), std::move(thread))}; @@ -178,7 +179,7 @@ ResultCode ServerSession::CompleteSyncRequest() { } ResultCode ServerSession::HandleSyncRequest(std::shared_ptr<Thread> thread, - Memory::Memory& memory) { + Core::Memory::Memory& memory) { Core::System::GetInstance().CoreTiming().ScheduleEvent(20000, request_event, {}); return QueueSyncRequest(std::move(thread), memory); } diff --git a/src/core/hle/kernel/server_session.h b/src/core/hle/kernel/server_session.h index 77e4f6721..403aaf10b 100644 --- a/src/core/hle/kernel/server_session.h +++ b/src/core/hle/kernel/server_session.h @@ -13,7 +13,7 @@ #include "core/hle/kernel/synchronization_object.h" #include "core/hle/result.h" -namespace Memory { +namespace Core::Memory { class Memory; } @@ -92,7 +92,7 @@ public: * * @returns ResultCode from the operation. */ - ResultCode HandleSyncRequest(std::shared_ptr<Thread> thread, Memory::Memory& memory); + ResultCode HandleSyncRequest(std::shared_ptr<Thread> thread, Core::Memory::Memory& memory); bool ShouldWait(const Thread* thread) const override; @@ -126,7 +126,7 @@ public: private: /// Queues a sync request from the emulated application. - ResultCode QueueSyncRequest(std::shared_ptr<Thread> thread, Memory::Memory& memory); + ResultCode QueueSyncRequest(std::shared_ptr<Thread> thread, Core::Memory::Memory& memory); /// Completes a sync request from the emulated application. ResultCode CompleteSyncRequest(); diff --git a/src/core/hle/kernel/shared_memory.cpp b/src/core/hle/kernel/shared_memory.cpp index afb2e3fc2..c67696757 100644 --- a/src/core/hle/kernel/shared_memory.cpp +++ b/src/core/hle/kernel/shared_memory.cpp @@ -2,149 +2,56 @@ // Licensed under GPLv2 or any later version // Refer to the license.txt file included. -#include <utility> - #include "common/assert.h" -#include "common/logging/log.h" -#include "core/hle/kernel/errors.h" +#include "core/core.h" #include "core/hle/kernel/kernel.h" +#include "core/hle/kernel/memory/page_table.h" #include "core/hle/kernel/shared_memory.h" namespace Kernel { -SharedMemory::SharedMemory(KernelCore& kernel) : Object{kernel} {} -SharedMemory::~SharedMemory() = default; - -std::shared_ptr<SharedMemory> SharedMemory::Create(KernelCore& kernel, Process* owner_process, - u64 size, MemoryPermission permissions, - MemoryPermission other_permissions, - VAddr address, MemoryRegion region, - std::string name) { - std::shared_ptr<SharedMemory> shared_memory = std::make_shared<SharedMemory>(kernel); - - shared_memory->owner_process = owner_process; - shared_memory->name = std::move(name); - shared_memory->size = size; - shared_memory->permissions = permissions; - shared_memory->other_permissions = other_permissions; - - if (address == 0) { - shared_memory->backing_block = std::make_shared<Kernel::PhysicalMemory>(size); - shared_memory->backing_block_offset = 0; - - // Refresh the address mappings for the current process. - if (kernel.CurrentProcess() != nullptr) { - kernel.CurrentProcess()->VMManager().RefreshMemoryBlockMappings( - shared_memory->backing_block.get()); - } - } else { - const auto& vm_manager = shared_memory->owner_process->VMManager(); +SharedMemory::SharedMemory(KernelCore& kernel, Core::DeviceMemory& device_memory) + : Object{kernel}, device_memory{device_memory} {} - // The memory is already available and mapped in the owner process. - const auto vma = vm_manager.FindVMA(address); - ASSERT_MSG(vm_manager.IsValidHandle(vma), "Invalid memory address"); - ASSERT_MSG(vma->second.backing_block, "Backing block doesn't exist for address"); - - // The returned VMA might be a bigger one encompassing the desired address. - const auto vma_offset = address - vma->first; - ASSERT_MSG(vma_offset + size <= vma->second.size, - "Shared memory exceeds bounds of mapped block"); - - shared_memory->backing_block = vma->second.backing_block; - shared_memory->backing_block_offset = vma->second.offset + vma_offset; - } - - shared_memory->base_address = address; +SharedMemory::~SharedMemory() = default; - return shared_memory; -} +std::shared_ptr<SharedMemory> SharedMemory::Create( + KernelCore& kernel, Core::DeviceMemory& device_memory, Process* owner_process, + Memory::PageLinkedList&& page_list, Memory::MemoryPermission owner_permission, + Memory::MemoryPermission user_permission, PAddr physical_address, std::size_t size, + std::string name) { -std::shared_ptr<SharedMemory> SharedMemory::CreateForApplet( - KernelCore& kernel, std::shared_ptr<Kernel::PhysicalMemory> heap_block, std::size_t offset, - u64 size, MemoryPermission permissions, MemoryPermission other_permissions, std::string name) { - std::shared_ptr<SharedMemory> shared_memory = std::make_shared<SharedMemory>(kernel); + std::shared_ptr<SharedMemory> shared_memory{ + std::make_shared<SharedMemory>(kernel, device_memory)}; - shared_memory->owner_process = nullptr; - shared_memory->name = std::move(name); + shared_memory->owner_process = owner_process; + shared_memory->page_list = std::move(page_list); + shared_memory->owner_permission = owner_permission; + shared_memory->user_permission = user_permission; + shared_memory->physical_address = physical_address; shared_memory->size = size; - shared_memory->permissions = permissions; - shared_memory->other_permissions = other_permissions; - shared_memory->backing_block = std::move(heap_block); - shared_memory->backing_block_offset = offset; - shared_memory->base_address = - kernel.CurrentProcess()->VMManager().GetHeapRegionBaseAddress() + offset; + shared_memory->name = name; return shared_memory; } -ResultCode SharedMemory::Map(Process& target_process, VAddr address, MemoryPermission permissions, - MemoryPermission other_permissions) { - const MemoryPermission own_other_permissions = - &target_process == owner_process ? this->permissions : this->other_permissions; - - // Automatically allocated memory blocks can only be mapped with other_permissions = DontCare - if (base_address == 0 && other_permissions != MemoryPermission::DontCare) { - return ERR_INVALID_MEMORY_PERMISSIONS; - } - - // Error out if the requested permissions don't match what the creator process allows. - if (static_cast<u32>(permissions) & ~static_cast<u32>(own_other_permissions)) { - LOG_ERROR(Kernel, "cannot map id={}, address=0x{:X} name={}, permissions don't match", - GetObjectId(), address, name); - return ERR_INVALID_MEMORY_PERMISSIONS; - } +ResultCode SharedMemory::Map(Process& target_process, VAddr address, std::size_t size, + Memory::MemoryPermission permission) { + const u64 page_count{(size + Memory::PageSize - 1) / Memory::PageSize}; - // Error out if the provided permissions are not compatible with what the creator process needs. - if (other_permissions != MemoryPermission::DontCare && - static_cast<u32>(this->permissions) & ~static_cast<u32>(other_permissions)) { - LOG_ERROR(Kernel, "cannot map id={}, address=0x{:X} name={}, permissions don't match", - GetObjectId(), address, name); - return ERR_INVALID_MEMORY_PERMISSIONS; + if (page_list.GetNumPages() != page_count) { + UNIMPLEMENTED_MSG("Page count does not match"); } - VAddr target_address = address; + Memory::MemoryPermission expected = + &target_process == owner_process ? owner_permission : user_permission; - // Map the memory block into the target process - auto result = target_process.VMManager().MapMemoryBlock( - target_address, backing_block, backing_block_offset, size, MemoryState::Shared); - if (result.Failed()) { - LOG_ERROR( - Kernel, - "cannot map id={}, target_address=0x{:X} name={}, error mapping to virtual memory", - GetObjectId(), target_address, name); - return result.Code(); + if (permission != expected) { + UNIMPLEMENTED_MSG("Permission does not match"); } - return target_process.VMManager().ReprotectRange(target_address, size, - ConvertPermissions(permissions)); -} - -ResultCode SharedMemory::Unmap(Process& target_process, VAddr address, u64 unmap_size) { - if (unmap_size != size) { - LOG_ERROR(Kernel, - "Invalid size passed to Unmap. Size must be equal to the size of the " - "memory managed. Shared memory size=0x{:016X}, Unmap size=0x{:016X}", - size, unmap_size); - return ERR_INVALID_SIZE; - } - - // TODO(Subv): Verify what happens if the application tries to unmap an address that is not - // mapped to a SharedMemory. - return target_process.VMManager().UnmapRange(address, size); -} - -VMAPermission SharedMemory::ConvertPermissions(MemoryPermission permission) { - u32 masked_permissions = - static_cast<u32>(permission) & static_cast<u32>(MemoryPermission::ReadWriteExecute); - return static_cast<VMAPermission>(masked_permissions); -} - -u8* SharedMemory::GetPointer(std::size_t offset) { - return backing_block->data() + backing_block_offset + offset; -} - -const u8* SharedMemory::GetPointer(std::size_t offset) const { - return backing_block->data() + backing_block_offset + offset; + return target_process.PageTable().MapPages(address, page_list, Memory::MemoryState::Shared, + permission); } } // namespace Kernel diff --git a/src/core/hle/kernel/shared_memory.h b/src/core/hle/kernel/shared_memory.h index 014951d82..cd16d6412 100644 --- a/src/core/hle/kernel/shared_memory.h +++ b/src/core/hle/kernel/shared_memory.h @@ -8,8 +8,10 @@ #include <string> #include "common/common_types.h" +#include "core/device_memory.h" +#include "core/hle/kernel/memory/memory_block.h" +#include "core/hle/kernel/memory/page_linked_list.h" #include "core/hle/kernel/object.h" -#include "core/hle/kernel/physical_memory.h" #include "core/hle/kernel/process.h" #include "core/hle/result.h" @@ -17,63 +19,21 @@ namespace Kernel { class KernelCore; -/// Permissions for mapped shared memory blocks -enum class MemoryPermission : u32 { - None = 0, - Read = (1u << 0), - Write = (1u << 1), - ReadWrite = (Read | Write), - Execute = (1u << 2), - ReadExecute = (Read | Execute), - WriteExecute = (Write | Execute), - ReadWriteExecute = (Read | Write | Execute), - DontCare = (1u << 28) -}; - class SharedMemory final : public Object { public: - explicit SharedMemory(KernelCore& kernel); + explicit SharedMemory(KernelCore& kernel, Core::DeviceMemory& device_memory); ~SharedMemory() override; - /** - * Creates a shared memory object. - * @param kernel The kernel instance to create a shared memory instance under. - * @param owner_process Process that created this shared memory object. - * @param size Size of the memory block. Must be page-aligned. - * @param permissions Permission restrictions applied to the process which created the block. - * @param other_permissions Permission restrictions applied to other processes mapping the - * block. - * @param address The address from which to map the Shared Memory. - * @param region If the address is 0, the shared memory will be allocated in this region of the - * linear heap. - * @param name Optional object name, used for debugging purposes. - */ - static std::shared_ptr<SharedMemory> Create(KernelCore& kernel, Process* owner_process, - u64 size, MemoryPermission permissions, - MemoryPermission other_permissions, - VAddr address = 0, - MemoryRegion region = MemoryRegion::BASE, - std::string name = "Unknown"); - - /** - * Creates a shared memory object from a block of memory managed by an HLE applet. - * @param kernel The kernel instance to create a shared memory instance under. - * @param heap_block Heap block of the HLE applet. - * @param offset The offset into the heap block that the SharedMemory will map. - * @param size Size of the memory block. Must be page-aligned. - * @param permissions Permission restrictions applied to the process which created the block. - * @param other_permissions Permission restrictions applied to other processes mapping the - * block. - * @param name Optional object name, used for debugging purposes. - */ - static std::shared_ptr<SharedMemory> CreateForApplet( - KernelCore& kernel, std::shared_ptr<Kernel::PhysicalMemory> heap_block, std::size_t offset, - u64 size, MemoryPermission permissions, MemoryPermission other_permissions, - std::string name = "Unknown Applet"); + static std::shared_ptr<SharedMemory> Create( + KernelCore& kernel, Core::DeviceMemory& device_memory, Process* owner_process, + Memory::PageLinkedList&& page_list, Memory::MemoryPermission owner_permission, + Memory::MemoryPermission user_permission, PAddr physical_address, std::size_t size, + std::string name); std::string GetTypeName() const override { return "SharedMemory"; } + std::string GetName() const override { return name; } @@ -83,71 +43,42 @@ public: return HANDLE_TYPE; } - /// Gets the size of the underlying memory block in bytes. - u64 GetSize() const { - return size; - } - - /** - * Converts the specified MemoryPermission into the equivalent VMAPermission. - * @param permission The MemoryPermission to convert. - */ - static VMAPermission ConvertPermissions(MemoryPermission permission); - /** * Maps a shared memory block to an address in the target process' address space - * @param target_process Process on which to map the memory block. + * @param target_process Process on which to map the memory block * @param address Address in system memory to map shared memory block to + * @param size Size of the shared memory block to map * @param permissions Memory block map permissions (specified by SVC field) - * @param other_permissions Memory block map other permissions (specified by SVC field) - */ - ResultCode Map(Process& target_process, VAddr address, MemoryPermission permissions, - MemoryPermission other_permissions); - - /** - * Unmaps a shared memory block from the specified address in system memory - * - * @param target_process Process from which to unmap the memory block. - * @param address Address in system memory where the shared memory block is mapped. - * @param unmap_size The amount of bytes to unmap from this shared memory instance. - * - * @return Result code of the unmap operation - * - * @pre The given size to unmap must be the same size as the amount of memory managed by - * the SharedMemory instance itself, otherwise ERR_INVALID_SIZE will be returned. */ - ResultCode Unmap(Process& target_process, VAddr address, u64 unmap_size); + ResultCode Map(Process& target_process, VAddr address, std::size_t size, + Memory::MemoryPermission permission); /** * Gets a pointer to the shared memory block * @param offset Offset from the start of the shared memory block to get pointer * @return A pointer to the shared memory block from the specified offset */ - u8* GetPointer(std::size_t offset = 0); + u8* GetPointer(std::size_t offset = 0) { + return device_memory.GetPointer(physical_address + offset); + } /** - * Gets a constant pointer to the shared memory block + * Gets a pointer to the shared memory block * @param offset Offset from the start of the shared memory block to get pointer - * @return A constant pointer to the shared memory block from the specified offset + * @return A pointer to the shared memory block from the specified offset */ - const u8* GetPointer(std::size_t offset = 0) const; + const u8* GetPointer(std::size_t offset = 0) const { + return device_memory.GetPointer(physical_address + offset); + } private: - /// Backing memory for this shared memory block. - std::shared_ptr<PhysicalMemory> backing_block; - /// Offset into the backing block for this shared memory. - std::size_t backing_block_offset = 0; - /// Size of the memory block. Page-aligned. - u64 size = 0; - /// Permission restrictions applied to the process which created the block. - MemoryPermission permissions{}; - /// Permission restrictions applied to other processes mapping the block. - MemoryPermission other_permissions{}; - /// Process that created this shared memory block. - Process* owner_process; - /// Address of shared memory block in the owner process if specified. - VAddr base_address = 0; - /// Name of shared memory object. + Core::DeviceMemory& device_memory; + Process* owner_process{}; + Memory::PageLinkedList page_list; + Memory::MemoryPermission owner_permission{}; + Memory::MemoryPermission user_permission{}; + PAddr physical_address{}; + std::size_t size{}; std::string name; }; diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index 4ffc113c2..4134acf65 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp @@ -24,6 +24,8 @@ #include "core/hle/kernel/errors.h" #include "core/hle/kernel/handle_table.h" #include "core/hle/kernel/kernel.h" +#include "core/hle/kernel/memory/memory_block.h" +#include "core/hle/kernel/memory/page_table.h" #include "core/hle/kernel/mutex.h" #include "core/hle/kernel/process.h" #include "core/hle/kernel/readable_event.h" @@ -31,6 +33,7 @@ #include "core/hle/kernel/scheduler.h" #include "core/hle/kernel/shared_memory.h" #include "core/hle/kernel/svc.h" +#include "core/hle/kernel/svc_types.h" #include "core/hle/kernel/svc_wrap.h" #include "core/hle/kernel/synchronization.h" #include "core/hle/kernel/thread.h" @@ -42,7 +45,7 @@ #include "core/memory.h" #include "core/reporter.h" -namespace Kernel { +namespace Kernel::Svc { namespace { // Checks if address + size is greater than the given address @@ -58,8 +61,8 @@ constexpr u64 MAIN_MEMORY_SIZE = 0x200000000; // Helper function that performs the common sanity checks for svcMapMemory // and svcUnmapMemory. This is doable, as both functions perform their sanitizing // in the same order. -ResultCode MapUnmapMemorySanityChecks(const VMManager& vm_manager, VAddr dst_addr, VAddr src_addr, - u64 size) { +ResultCode MapUnmapMemorySanityChecks(const Memory::PageTable& manager, VAddr dst_addr, + VAddr src_addr, u64 size) { if (!Common::Is4KBAligned(dst_addr)) { LOG_ERROR(Kernel_SVC, "Destination address is not aligned to 4KB, 0x{:016X}", dst_addr); return ERR_INVALID_ADDRESS; @@ -93,36 +96,33 @@ ResultCode MapUnmapMemorySanityChecks(const VMManager& vm_manager, VAddr dst_add return ERR_INVALID_ADDRESS_STATE; } - if (!vm_manager.IsWithinAddressSpace(src_addr, size)) { + if (!manager.IsInsideAddressSpace(src_addr, size)) { LOG_ERROR(Kernel_SVC, "Source is not within the address space, addr=0x{:016X}, size=0x{:016X}", src_addr, size); return ERR_INVALID_ADDRESS_STATE; } - if (!vm_manager.IsWithinStackRegion(dst_addr, size)) { + if (manager.IsOutsideStackRegion(dst_addr, size)) { LOG_ERROR(Kernel_SVC, "Destination is not within the stack region, addr=0x{:016X}, size=0x{:016X}", dst_addr, size); return ERR_INVALID_MEMORY_RANGE; } - const VAddr dst_end_address = dst_addr + size; - if (dst_end_address > vm_manager.GetHeapRegionBaseAddress() && - vm_manager.GetHeapRegionEndAddress() > dst_addr) { + if (manager.IsInsideHeapRegion(dst_addr, size)) { LOG_ERROR(Kernel_SVC, "Destination does not fit within the heap region, addr=0x{:016X}, " - "size=0x{:016X}, end_addr=0x{:016X}", - dst_addr, size, dst_end_address); + "size=0x{:016X}", + dst_addr, size); return ERR_INVALID_MEMORY_RANGE; } - if (dst_end_address > vm_manager.GetMapRegionBaseAddress() && - vm_manager.GetMapRegionEndAddress() > dst_addr) { + if (manager.IsInsideAliasRegion(dst_addr, size)) { LOG_ERROR(Kernel_SVC, "Destination does not fit within the map region, addr=0x{:016X}, " - "size=0x{:016X}, end_addr=0x{:016X}", - dst_addr, size, dst_end_address); + "size=0x{:016X}", + dst_addr, size); return ERR_INVALID_MEMORY_RANGE; } @@ -177,13 +177,10 @@ static ResultCode SetHeapSize(Core::System& system, VAddr* heap_addr, u64 heap_s return ERR_INVALID_SIZE; } - auto& vm_manager = system.Kernel().CurrentProcess()->VMManager(); - const auto alloc_result = vm_manager.SetHeapSize(heap_size); - if (alloc_result.Failed()) { - return alloc_result.Code(); - } + auto& page_table{system.Kernel().CurrentProcess()->PageTable()}; + + CASCADE_RESULT(*heap_addr, page_table.SetHeapSize(heap_size)); - *heap_addr = *alloc_result; return RESULT_SUCCESS; } @@ -194,63 +191,6 @@ static ResultCode SetHeapSize32(Core::System& system, u32* heap_addr, u32 heap_s return result; } -static ResultCode SetMemoryPermission(Core::System& system, VAddr addr, u64 size, u32 prot) { - LOG_TRACE(Kernel_SVC, "called, addr=0x{:X}, size=0x{:X}, prot=0x{:X}", addr, size, prot); - - if (!Common::Is4KBAligned(addr)) { - LOG_ERROR(Kernel_SVC, "Address is not aligned to 4KB, addr=0x{:016X}", addr); - return ERR_INVALID_ADDRESS; - } - - if (size == 0) { - LOG_ERROR(Kernel_SVC, "Size is 0"); - return ERR_INVALID_SIZE; - } - - if (!Common::Is4KBAligned(size)) { - LOG_ERROR(Kernel_SVC, "Size is not aligned to 4KB, size=0x{:016X}", size); - return ERR_INVALID_SIZE; - } - - if (!IsValidAddressRange(addr, size)) { - LOG_ERROR(Kernel_SVC, "Region is not a valid address range, addr=0x{:016X}, size=0x{:016X}", - addr, size); - return ERR_INVALID_ADDRESS_STATE; - } - - const auto permission = static_cast<MemoryPermission>(prot); - if (permission != MemoryPermission::None && permission != MemoryPermission::Read && - permission != MemoryPermission::ReadWrite) { - LOG_ERROR(Kernel_SVC, "Invalid memory permission specified, Got memory permission=0x{:08X}", - static_cast<u32>(permission)); - return ERR_INVALID_MEMORY_PERMISSIONS; - } - - auto* const current_process = system.Kernel().CurrentProcess(); - auto& vm_manager = current_process->VMManager(); - - if (!vm_manager.IsWithinAddressSpace(addr, size)) { - LOG_ERROR(Kernel_SVC, - "Source is not within the address space, addr=0x{:016X}, size=0x{:016X}", addr, - size); - return ERR_INVALID_ADDRESS_STATE; - } - - const VMManager::VMAHandle iter = vm_manager.FindVMA(addr); - if (!vm_manager.IsValidHandle(iter)) { - LOG_ERROR(Kernel_SVC, "Unable to find VMA for address=0x{:016X}", addr); - return ERR_INVALID_ADDRESS_STATE; - } - - LOG_WARNING(Kernel_SVC, "Uniformity check on protected memory is not implemented."); - // TODO: Performs a uniformity check to make sure only protected memory is changed (it doesn't - // make sense to allow changing permissions on kernel memory itself, etc). - - const auto converted_permissions = SharedMemory::ConvertPermissions(permission); - - return vm_manager.ReprotectRange(addr, size, converted_permissions); -} - static ResultCode SetMemoryAttribute(Core::System& system, VAddr address, u64 size, u32 mask, u32 attribute) { LOG_DEBUG(Kernel_SVC, @@ -274,30 +214,19 @@ static ResultCode SetMemoryAttribute(Core::System& system, VAddr address, u64 si return ERR_INVALID_ADDRESS_STATE; } - const auto mem_attribute = static_cast<MemoryAttribute>(attribute); - const auto mem_mask = static_cast<MemoryAttribute>(mask); - const auto attribute_with_mask = mem_attribute | mem_mask; - - if (attribute_with_mask != mem_mask) { + const auto attributes{static_cast<Memory::MemoryAttribute>(mask | attribute)}; + if (attributes != static_cast<Memory::MemoryAttribute>(mask) || + (attributes | Memory::MemoryAttribute::Uncached) != Memory::MemoryAttribute::Uncached) { LOG_ERROR(Kernel_SVC, "Memory attribute doesn't match the given mask (Attribute: 0x{:X}, Mask: {:X}", attribute, mask); return ERR_INVALID_COMBINATION; } - if ((attribute_with_mask | MemoryAttribute::Uncached) != MemoryAttribute::Uncached) { - LOG_ERROR(Kernel_SVC, "Specified attribute isn't equal to MemoryAttributeUncached (8)."); - return ERR_INVALID_COMBINATION; - } - - auto& vm_manager = system.Kernel().CurrentProcess()->VMManager(); - if (!vm_manager.IsWithinAddressSpace(address, size)) { - LOG_ERROR(Kernel_SVC, - "Given address (0x{:016X}) is outside the bounds of the address space.", address); - return ERR_INVALID_ADDRESS_STATE; - } + auto& page_table{system.Kernel().CurrentProcess()->PageTable()}; - return vm_manager.SetMemoryAttribute(address, size, mem_mask, mem_attribute); + return page_table.SetMemoryAttribute(address, size, static_cast<Memory::MemoryAttribute>(mask), + static_cast<Memory::MemoryAttribute>(attribute)); } /// Maps a memory range into a different range. @@ -305,14 +234,14 @@ static ResultCode MapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr LOG_TRACE(Kernel_SVC, "called, dst_addr=0x{:X}, src_addr=0x{:X}, size=0x{:X}", dst_addr, src_addr, size); - auto& vm_manager = system.Kernel().CurrentProcess()->VMManager(); - const auto result = MapUnmapMemorySanityChecks(vm_manager, dst_addr, src_addr, size); + auto& page_table{system.Kernel().CurrentProcess()->PageTable()}; - if (result.IsError()) { + if (const ResultCode result{MapUnmapMemorySanityChecks(page_table, dst_addr, src_addr, size)}; + result.IsError()) { return result; } - return vm_manager.MirrorMemory(dst_addr, src_addr, size, MemoryState::Stack); + return page_table.Map(dst_addr, src_addr, size); } /// Unmaps a region that was previously mapped with svcMapMemory @@ -320,21 +249,14 @@ static ResultCode UnmapMemory(Core::System& system, VAddr dst_addr, VAddr src_ad LOG_TRACE(Kernel_SVC, "called, dst_addr=0x{:X}, src_addr=0x{:X}, size=0x{:X}", dst_addr, src_addr, size); - auto& vm_manager = system.Kernel().CurrentProcess()->VMManager(); - const auto result = MapUnmapMemorySanityChecks(vm_manager, dst_addr, src_addr, size); + auto& page_table{system.Kernel().CurrentProcess()->PageTable()}; - if (result.IsError()) { + if (const ResultCode result{MapUnmapMemorySanityChecks(page_table, dst_addr, src_addr, size)}; + result.IsError()) { return result; } - const auto unmap_res = vm_manager.UnmapRange(dst_addr, size); - - // Reprotect the source mapping on success - if (unmap_res.IsSuccess()) { - ASSERT(vm_manager.ReprotectRange(src_addr, size, VMAPermission::ReadWrite).IsSuccess()); - } - - return unmap_res; + return page_table.Unmap(dst_addr, src_addr, size); } /// Connect to an OS service given the port name, returns the handle to the port to out @@ -367,6 +289,8 @@ static ResultCode ConnectToNamedPort(Core::System& system, Handle* out_handle, return ERR_NOT_FOUND; } + ASSERT(kernel.CurrentProcess()->GetResourceLimit()->Reserve(ResourceType::Sessions, 1)); + auto client_port = it->second; std::shared_ptr<ClientSession> client_session; @@ -538,7 +462,7 @@ static ResultCode ArbitrateLock(Core::System& system, Handle holding_thread_hand "requesting_current_thread_handle=0x{:08X}", holding_thread_handle, mutex_addr, requesting_thread_handle); - if (Memory::IsKernelVirtualAddress(mutex_addr)) { + if (Core::Memory::IsKernelVirtualAddress(mutex_addr)) { LOG_ERROR(Kernel_SVC, "Mutex Address is a kernel virtual address, mutex_addr={:016X}", mutex_addr); return ERR_INVALID_ADDRESS_STATE; @@ -558,7 +482,7 @@ static ResultCode ArbitrateLock(Core::System& system, Handle holding_thread_hand static ResultCode ArbitrateUnlock(Core::System& system, VAddr mutex_addr) { LOG_TRACE(Kernel_SVC, "called mutex_addr=0x{:X}", mutex_addr); - if (Memory::IsKernelVirtualAddress(mutex_addr)) { + if (Core::Memory::IsKernelVirtualAddress(mutex_addr)) { LOG_ERROR(Kernel_SVC, "Mutex Address is a kernel virtual address, mutex_addr={:016X}", mutex_addr); return ERR_INVALID_ADDRESS_STATE; @@ -683,7 +607,6 @@ static void Break(Core::System& system, u32 reason, u64 info1, u64 info2) { auto* const current_thread = system.CurrentScheduler().GetCurrentThread(); const auto thread_processor_id = current_thread->GetProcessorID(); system.ArmInterface(static_cast<std::size_t>(thread_processor_id)).LogBacktrace(); - ASSERT(false); system.Kernel().CurrentProcess()->PrepareForTermination(); @@ -785,35 +708,35 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha return RESULT_SUCCESS; case GetInfoType::MapRegionBaseAddr: - *result = process->VMManager().GetMapRegionBaseAddress(); + *result = process->PageTable().GetAliasRegionStart(); return RESULT_SUCCESS; case GetInfoType::MapRegionSize: - *result = process->VMManager().GetMapRegionSize(); + *result = process->PageTable().GetAliasRegionSize(); return RESULT_SUCCESS; case GetInfoType::HeapRegionBaseAddr: - *result = process->VMManager().GetHeapRegionBaseAddress(); + *result = process->PageTable().GetHeapRegionStart(); return RESULT_SUCCESS; case GetInfoType::HeapRegionSize: - *result = process->VMManager().GetHeapRegionSize(); + *result = process->PageTable().GetHeapRegionSize(); return RESULT_SUCCESS; case GetInfoType::ASLRRegionBaseAddr: - *result = process->VMManager().GetASLRRegionBaseAddress(); + *result = process->PageTable().GetAliasCodeRegionStart(); return RESULT_SUCCESS; case GetInfoType::ASLRRegionSize: - *result = process->VMManager().GetASLRRegionSize(); + *result = process->PageTable().GetAliasCodeRegionSize(); return RESULT_SUCCESS; case GetInfoType::StackRegionBaseAddr: - *result = process->VMManager().GetStackRegionBaseAddress(); + *result = process->PageTable().GetStackRegionStart(); return RESULT_SUCCESS; case GetInfoType::StackRegionSize: - *result = process->VMManager().GetStackRegionSize(); + *result = process->PageTable().GetStackRegionSize(); return RESULT_SUCCESS; case GetInfoType::TotalPhysicalMemoryAvailable: @@ -987,20 +910,29 @@ static ResultCode MapPhysicalMemory(Core::System& system, VAddr addr, u64 size) return ERR_INVALID_MEMORY_RANGE; } - Process* const current_process = system.Kernel().CurrentProcess(); - auto& vm_manager = current_process->VMManager(); + Process* const current_process{system.Kernel().CurrentProcess()}; + auto& page_table{current_process->PageTable()}; if (current_process->GetSystemResourceSize() == 0) { LOG_ERROR(Kernel_SVC, "System Resource Size is zero"); return ERR_INVALID_STATE; } - if (!vm_manager.IsWithinMapRegion(addr, size)) { - LOG_ERROR(Kernel_SVC, "Range not within map region"); + if (!page_table.IsInsideAddressSpace(addr, size)) { + LOG_ERROR(Kernel_SVC, + "Address is not within the address space, addr=0x{:016X}, size=0x{:016X}", addr, + size); + return ERR_INVALID_MEMORY_RANGE; + } + + if (page_table.IsOutsideAliasRegion(addr, size)) { + LOG_ERROR(Kernel_SVC, + "Address is not within the alias region, addr=0x{:016X}, size=0x{:016X}", addr, + size); return ERR_INVALID_MEMORY_RANGE; } - return vm_manager.MapPhysicalMemory(addr, size); + return page_table.MapPhysicalMemory(addr, size); } /// Unmaps memory previously mapped via MapPhysicalMemory @@ -1027,20 +959,29 @@ static ResultCode UnmapPhysicalMemory(Core::System& system, VAddr addr, u64 size return ERR_INVALID_MEMORY_RANGE; } - Process* const current_process = system.Kernel().CurrentProcess(); - auto& vm_manager = current_process->VMManager(); + Process* const current_process{system.Kernel().CurrentProcess()}; + auto& page_table{current_process->PageTable()}; if (current_process->GetSystemResourceSize() == 0) { LOG_ERROR(Kernel_SVC, "System Resource Size is zero"); return ERR_INVALID_STATE; } - if (!vm_manager.IsWithinMapRegion(addr, size)) { - LOG_ERROR(Kernel_SVC, "Range not within map region"); + if (!page_table.IsInsideAddressSpace(addr, size)) { + LOG_ERROR(Kernel_SVC, + "Address is not within the address space, addr=0x{:016X}, size=0x{:016X}", addr, + size); + return ERR_INVALID_MEMORY_RANGE; + } + + if (page_table.IsOutsideAliasRegion(addr, size)) { + LOG_ERROR(Kernel_SVC, + "Address is not within the alias region, addr=0x{:016X}, size=0x{:016X}", addr, + size); return ERR_INVALID_MEMORY_RANGE; } - return vm_manager.UnmapPhysicalMemory(addr, size); + return page_table.UnmapPhysicalMemory(addr, size); } /// Sets the thread activity @@ -1197,74 +1138,49 @@ static ResultCode MapSharedMemory(Core::System& system, Handle shared_memory_han return ERR_INVALID_ADDRESS_STATE; } - const auto permissions_type = static_cast<MemoryPermission>(permissions); - if (permissions_type != MemoryPermission::Read && - permissions_type != MemoryPermission::ReadWrite) { + const auto permission_type = static_cast<Memory::MemoryPermission>(permissions); + if ((permission_type | Memory::MemoryPermission::Write) != + Memory::MemoryPermission::ReadAndWrite) { LOG_ERROR(Kernel_SVC, "Expected Read or ReadWrite permission but got permissions=0x{:08X}", permissions); return ERR_INVALID_MEMORY_PERMISSIONS; } - auto* const current_process = system.Kernel().CurrentProcess(); - auto shared_memory = current_process->GetHandleTable().Get<SharedMemory>(shared_memory_handle); - if (!shared_memory) { - LOG_ERROR(Kernel_SVC, "Shared memory does not exist, shared_memory_handle=0x{:08X}", - shared_memory_handle); - return ERR_INVALID_HANDLE; - } + auto* const current_process{system.Kernel().CurrentProcess()}; + auto& page_table{current_process->PageTable()}; - const auto& vm_manager = current_process->VMManager(); - if (!vm_manager.IsWithinASLRRegion(addr, size)) { - LOG_ERROR(Kernel_SVC, "Region is not within the ASLR region. addr=0x{:016X}, size={:016X}", + if (page_table.IsInvalidRegion(addr, size)) { + LOG_ERROR(Kernel_SVC, + "Addr does not fit within the valid region, addr=0x{:016X}, " + "size=0x{:016X}", addr, size); return ERR_INVALID_MEMORY_RANGE; } - return shared_memory->Map(*current_process, addr, permissions_type, MemoryPermission::DontCare); -} - -static ResultCode UnmapSharedMemory(Core::System& system, Handle shared_memory_handle, VAddr addr, - u64 size) { - LOG_WARNING(Kernel_SVC, "called, shared_memory_handle=0x{:08X}, addr=0x{:X}, size=0x{:X}", - shared_memory_handle, addr, size); - - if (!Common::Is4KBAligned(addr)) { - LOG_ERROR(Kernel_SVC, "Address is not aligned to 4KB, addr=0x{:016X}", addr); - return ERR_INVALID_ADDRESS; - } - - if (size == 0) { - LOG_ERROR(Kernel_SVC, "Size is 0"); - return ERR_INVALID_SIZE; - } - - if (!Common::Is4KBAligned(size)) { - LOG_ERROR(Kernel_SVC, "Size is not aligned to 4KB, size=0x{:016X}", size); - return ERR_INVALID_SIZE; + if (page_table.IsInsideHeapRegion(addr, size)) { + LOG_ERROR(Kernel_SVC, + "Addr does not fit within the heap region, addr=0x{:016X}, " + "size=0x{:016X}", + addr, size); + return ERR_INVALID_MEMORY_RANGE; } - if (!IsValidAddressRange(addr, size)) { - LOG_ERROR(Kernel_SVC, "Region is not a valid address range, addr=0x{:016X}, size=0x{:016X}", + if (page_table.IsInsideAliasRegion(addr, size)) { + LOG_ERROR(Kernel_SVC, + "Address does not fit within the map region, addr=0x{:016X}, " + "size=0x{:016X}", addr, size); - return ERR_INVALID_ADDRESS_STATE; + return ERR_INVALID_MEMORY_RANGE; } - auto* const current_process = system.Kernel().CurrentProcess(); - auto shared_memory = current_process->GetHandleTable().Get<SharedMemory>(shared_memory_handle); + auto shared_memory{current_process->GetHandleTable().Get<SharedMemory>(shared_memory_handle)}; if (!shared_memory) { LOG_ERROR(Kernel_SVC, "Shared memory does not exist, shared_memory_handle=0x{:08X}", shared_memory_handle); return ERR_INVALID_HANDLE; } - const auto& vm_manager = current_process->VMManager(); - if (!vm_manager.IsWithinASLRRegion(addr, size)) { - LOG_ERROR(Kernel_SVC, "Region is not within the ASLR region. addr=0x{:016X}, size={:016X}", - addr, size); - return ERR_INVALID_MEMORY_RANGE; - } - - return shared_memory->Unmap(*current_process, addr, size); + return shared_memory->Map(*current_process, addr, size, permission_type); } static ResultCode QueryProcessMemory(Core::System& system, VAddr memory_info_address, @@ -1279,18 +1195,17 @@ static ResultCode QueryProcessMemory(Core::System& system, VAddr memory_info_add return ERR_INVALID_HANDLE; } - auto& memory = system.Memory(); - const auto& vm_manager = process->VMManager(); - const MemoryInfo memory_info = vm_manager.QueryMemory(address); - - memory.Write64(memory_info_address, memory_info.base_address); - memory.Write64(memory_info_address + 8, memory_info.size); - memory.Write32(memory_info_address + 16, memory_info.state); - memory.Write32(memory_info_address + 20, memory_info.attributes); - memory.Write32(memory_info_address + 24, memory_info.permission); - memory.Write32(memory_info_address + 32, memory_info.ipc_ref_count); - memory.Write32(memory_info_address + 28, memory_info.device_ref_count); - memory.Write32(memory_info_address + 36, 0); + auto& memory{system.Memory()}; + const auto memory_info{process->PageTable().QueryInfo(address).GetSvcMemoryInfo()}; + + memory.Write64(memory_info_address + 0x00, memory_info.addr); + memory.Write64(memory_info_address + 0x08, memory_info.size); + memory.Write32(memory_info_address + 0x10, static_cast<u32>(memory_info.state) & 0xff); + memory.Write32(memory_info_address + 0x14, static_cast<u32>(memory_info.attr)); + memory.Write32(memory_info_address + 0x18, static_cast<u32>(memory_info.perm)); + memory.Write32(memory_info_address + 0x1c, memory_info.ipc_refcount); + memory.Write32(memory_info_address + 0x20, memory_info.device_refcount); + memory.Write32(memory_info_address + 0x24, 0); // Page info appears to be currently unused by the kernel and is always set to zero. memory.Write32(page_info_address, 0); @@ -1314,142 +1229,6 @@ static ResultCode QueryMemory32(Core::System& system, u32 memory_info_address, return QueryMemory(system, memory_info_address, page_info_address, query_address); } -static ResultCode MapProcessCodeMemory(Core::System& system, Handle process_handle, u64 dst_address, - u64 src_address, u64 size) { - LOG_DEBUG(Kernel_SVC, - "called. process_handle=0x{:08X}, dst_address=0x{:016X}, " - "src_address=0x{:016X}, size=0x{:016X}", - process_handle, dst_address, src_address, size); - - if (!Common::Is4KBAligned(src_address)) { - LOG_ERROR(Kernel_SVC, "src_address is not page-aligned (src_address=0x{:016X}).", - src_address); - return ERR_INVALID_ADDRESS; - } - - if (!Common::Is4KBAligned(dst_address)) { - LOG_ERROR(Kernel_SVC, "dst_address is not page-aligned (dst_address=0x{:016X}).", - dst_address); - return ERR_INVALID_ADDRESS; - } - - if (size == 0 || !Common::Is4KBAligned(size)) { - LOG_ERROR(Kernel_SVC, "Size is zero or not page-aligned (size=0x{:016X})", size); - return ERR_INVALID_SIZE; - } - - if (!IsValidAddressRange(dst_address, size)) { - LOG_ERROR(Kernel_SVC, - "Destination address range overflows the address space (dst_address=0x{:016X}, " - "size=0x{:016X}).", - dst_address, size); - return ERR_INVALID_ADDRESS_STATE; - } - - if (!IsValidAddressRange(src_address, size)) { - LOG_ERROR(Kernel_SVC, - "Source address range overflows the address space (src_address=0x{:016X}, " - "size=0x{:016X}).", - src_address, size); - return ERR_INVALID_ADDRESS_STATE; - } - - const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); - auto process = handle_table.Get<Process>(process_handle); - if (!process) { - LOG_ERROR(Kernel_SVC, "Invalid process handle specified (handle=0x{:08X}).", - process_handle); - return ERR_INVALID_HANDLE; - } - - auto& vm_manager = process->VMManager(); - if (!vm_manager.IsWithinAddressSpace(src_address, size)) { - LOG_ERROR(Kernel_SVC, - "Source address range is not within the address space (src_address=0x{:016X}, " - "size=0x{:016X}).", - src_address, size); - return ERR_INVALID_ADDRESS_STATE; - } - - if (!vm_manager.IsWithinASLRRegion(dst_address, size)) { - LOG_ERROR(Kernel_SVC, - "Destination address range is not within the ASLR region (dst_address=0x{:016X}, " - "size=0x{:016X}).", - dst_address, size); - return ERR_INVALID_MEMORY_RANGE; - } - - return vm_manager.MapCodeMemory(dst_address, src_address, size); -} - -static ResultCode UnmapProcessCodeMemory(Core::System& system, Handle process_handle, - u64 dst_address, u64 src_address, u64 size) { - LOG_DEBUG(Kernel_SVC, - "called. process_handle=0x{:08X}, dst_address=0x{:016X}, src_address=0x{:016X}, " - "size=0x{:016X}", - process_handle, dst_address, src_address, size); - - if (!Common::Is4KBAligned(dst_address)) { - LOG_ERROR(Kernel_SVC, "dst_address is not page-aligned (dst_address=0x{:016X}).", - dst_address); - return ERR_INVALID_ADDRESS; - } - - if (!Common::Is4KBAligned(src_address)) { - LOG_ERROR(Kernel_SVC, "src_address is not page-aligned (src_address=0x{:016X}).", - src_address); - return ERR_INVALID_ADDRESS; - } - - if (size == 0 || Common::Is4KBAligned(size)) { - LOG_ERROR(Kernel_SVC, "Size is zero or not page-aligned (size=0x{:016X}).", size); - return ERR_INVALID_SIZE; - } - - if (!IsValidAddressRange(dst_address, size)) { - LOG_ERROR(Kernel_SVC, - "Destination address range overflows the address space (dst_address=0x{:016X}, " - "size=0x{:016X}).", - dst_address, size); - return ERR_INVALID_ADDRESS_STATE; - } - - if (!IsValidAddressRange(src_address, size)) { - LOG_ERROR(Kernel_SVC, - "Source address range overflows the address space (src_address=0x{:016X}, " - "size=0x{:016X}).", - src_address, size); - return ERR_INVALID_ADDRESS_STATE; - } - - const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); - auto process = handle_table.Get<Process>(process_handle); - if (!process) { - LOG_ERROR(Kernel_SVC, "Invalid process handle specified (handle=0x{:08X}).", - process_handle); - return ERR_INVALID_HANDLE; - } - - auto& vm_manager = process->VMManager(); - if (!vm_manager.IsWithinAddressSpace(src_address, size)) { - LOG_ERROR(Kernel_SVC, - "Source address range is not within the address space (src_address=0x{:016X}, " - "size=0x{:016X}).", - src_address, size); - return ERR_INVALID_ADDRESS_STATE; - } - - if (!vm_manager.IsWithinASLRRegion(dst_address, size)) { - LOG_ERROR(Kernel_SVC, - "Destination address range is not within the ASLR region (dst_address=0x{:016X}, " - "size=0x{:016X}).", - dst_address, size); - return ERR_INVALID_MEMORY_RANGE; - } - - return vm_manager.UnmapCodeMemory(dst_address, src_address, size); -} - /// Exits the current process static void ExitProcess(Core::System& system) { auto* current_process = system.Kernel().CurrentProcess(); @@ -1506,6 +1285,9 @@ static ResultCode CreateThread(Core::System& system, Handle* out_handle, VAddr e } auto& kernel = system.Kernel(); + + ASSERT(kernel.CurrentProcess()->GetResourceLimit()->Reserve(ResourceType::Threads, 1)); + CASCADE_RESULT(std::shared_ptr<Thread> thread, Thread::Create(kernel, "", entry_point, priority, arg, processor_id, stack_top, *current_process)); @@ -1610,7 +1392,7 @@ static ResultCode WaitProcessWideKeyAtomic(Core::System& system, VAddr mutex_add "called mutex_addr={:X}, condition_variable_addr={:X}, thread_handle=0x{:08X}, timeout={}", mutex_addr, condition_variable_addr, thread_handle, nano_seconds); - if (Memory::IsKernelVirtualAddress(mutex_addr)) { + if (Core::Memory::IsKernelVirtualAddress(mutex_addr)) { LOG_ERROR( Kernel_SVC, "Given mutex address must not be within the kernel address space. address=0x{:016X}", @@ -1741,7 +1523,7 @@ static ResultCode WaitForAddress(Core::System& system, VAddr address, u32 type, type, value, timeout); // If the passed address is a kernel virtual address, return invalid memory state. - if (Memory::IsKernelVirtualAddress(address)) { + if (Core::Memory::IsKernelVirtualAddress(address)) { LOG_ERROR(Kernel_SVC, "Address is a kernel virtual address, address={:016X}", address); return ERR_INVALID_ADDRESS_STATE; } @@ -1769,7 +1551,7 @@ static ResultCode SignalToAddress(Core::System& system, VAddr address, u32 type, address, type, value, num_to_wake); // If the passed address is a kernel virtual address, return invalid memory state. - if (Memory::IsKernelVirtualAddress(address)) { + if (Core::Memory::IsKernelVirtualAddress(address)) { LOG_ERROR(Kernel_SVC, "Address is a kernel virtual address, address={:016X}", address); return ERR_INVALID_ADDRESS_STATE; } @@ -1865,9 +1647,9 @@ static ResultCode CreateTransferMemory(Core::System& system, Handle* handle, VAd return ERR_INVALID_ADDRESS_STATE; } - const auto perms = static_cast<MemoryPermission>(permissions); - if (perms != MemoryPermission::None && perms != MemoryPermission::Read && - perms != MemoryPermission::ReadWrite) { + const auto perms{static_cast<Memory::MemoryPermission>(permissions)}; + if (perms > Memory::MemoryPermission::ReadAndWrite || + perms == Memory::MemoryPermission::Write) { LOG_ERROR(Kernel_SVC, "Invalid memory permissions for transfer memory! (perms={:08X})", permissions); return ERR_INVALID_MEMORY_PERMISSIONS; @@ -1890,111 +1672,6 @@ static ResultCode CreateTransferMemory(Core::System& system, Handle* handle, VAd return RESULT_SUCCESS; } -static ResultCode MapTransferMemory(Core::System& system, Handle handle, VAddr address, u64 size, - u32 permission_raw) { - LOG_DEBUG(Kernel_SVC, - "called. handle=0x{:08X}, address=0x{:016X}, size=0x{:016X}, permissions=0x{:08X}", - handle, address, size, permission_raw); - - if (!Common::Is4KBAligned(address)) { - LOG_ERROR(Kernel_SVC, "Transfer memory addresses must be 4KB aligned (size=0x{:016X}).", - address); - return ERR_INVALID_ADDRESS; - } - - if (size == 0 || !Common::Is4KBAligned(size)) { - LOG_ERROR(Kernel_SVC, - "Transfer memory sizes must be 4KB aligned and not be zero (size=0x{:016X}).", - size); - return ERR_INVALID_SIZE; - } - - if (!IsValidAddressRange(address, size)) { - LOG_ERROR(Kernel_SVC, - "Given address and size overflows the 64-bit range (address=0x{:016X}, " - "size=0x{:016X}).", - address, size); - return ERR_INVALID_ADDRESS_STATE; - } - - const auto permissions = static_cast<MemoryPermission>(permission_raw); - if (permissions != MemoryPermission::None && permissions != MemoryPermission::Read && - permissions != MemoryPermission::ReadWrite) { - LOG_ERROR(Kernel_SVC, "Invalid transfer memory permissions given (permissions=0x{:08X}).", - permission_raw); - return ERR_INVALID_STATE; - } - - const auto& kernel = system.Kernel(); - const auto* const current_process = kernel.CurrentProcess(); - const auto& handle_table = current_process->GetHandleTable(); - - auto transfer_memory = handle_table.Get<TransferMemory>(handle); - if (!transfer_memory) { - LOG_ERROR(Kernel_SVC, "Nonexistent transfer memory handle given (handle=0x{:08X}).", - handle); - return ERR_INVALID_HANDLE; - } - - if (!current_process->VMManager().IsWithinASLRRegion(address, size)) { - LOG_ERROR(Kernel_SVC, - "Given address and size don't fully fit within the ASLR region " - "(address=0x{:016X}, size=0x{:016X}).", - address, size); - return ERR_INVALID_MEMORY_RANGE; - } - - return transfer_memory->MapMemory(address, size, permissions); -} - -static ResultCode UnmapTransferMemory(Core::System& system, Handle handle, VAddr address, - u64 size) { - LOG_DEBUG(Kernel_SVC, "called. handle=0x{:08X}, address=0x{:016X}, size=0x{:016X}", handle, - address, size); - - if (!Common::Is4KBAligned(address)) { - LOG_ERROR(Kernel_SVC, "Transfer memory addresses must be 4KB aligned (size=0x{:016X}).", - address); - return ERR_INVALID_ADDRESS; - } - - if (size == 0 || !Common::Is4KBAligned(size)) { - LOG_ERROR(Kernel_SVC, - "Transfer memory sizes must be 4KB aligned and not be zero (size=0x{:016X}).", - size); - return ERR_INVALID_SIZE; - } - - if (!IsValidAddressRange(address, size)) { - LOG_ERROR(Kernel_SVC, - "Given address and size overflows the 64-bit range (address=0x{:016X}, " - "size=0x{:016X}).", - address, size); - return ERR_INVALID_ADDRESS_STATE; - } - - const auto& kernel = system.Kernel(); - const auto* const current_process = kernel.CurrentProcess(); - const auto& handle_table = current_process->GetHandleTable(); - - auto transfer_memory = handle_table.Get<TransferMemory>(handle); - if (!transfer_memory) { - LOG_ERROR(Kernel_SVC, "Nonexistent transfer memory handle given (handle=0x{:08X}).", - handle); - return ERR_INVALID_HANDLE; - } - - if (!current_process->VMManager().IsWithinASLRRegion(address, size)) { - LOG_ERROR(Kernel_SVC, - "Given address and size don't fully fit within the ASLR region " - "(address=0x{:016X}, size=0x{:016X}).", - address, size); - return ERR_INVALID_MEMORY_RANGE; - } - - return transfer_memory->UnmapMemory(address, size); -} - static ResultCode GetThreadCoreMask(Core::System& system, Handle thread_handle, u32* core, u64* mask) { LOG_TRACE(Kernel_SVC, "called, handle=0x{:08X}", thread_handle); @@ -2073,52 +1750,6 @@ static ResultCode SetThreadCoreMask(Core::System& system, Handle thread_handle, return RESULT_SUCCESS; } -static ResultCode CreateSharedMemory(Core::System& system, Handle* handle, u64 size, - u32 local_permissions, u32 remote_permissions) { - LOG_TRACE(Kernel_SVC, "called, size=0x{:X}, localPerms=0x{:08X}, remotePerms=0x{:08X}", size, - local_permissions, remote_permissions); - if (size == 0) { - LOG_ERROR(Kernel_SVC, "Size is 0"); - return ERR_INVALID_SIZE; - } - if (!Common::Is4KBAligned(size)) { - LOG_ERROR(Kernel_SVC, "Size is not aligned to 4KB, 0x{:016X}", size); - return ERR_INVALID_SIZE; - } - - if (size >= MAIN_MEMORY_SIZE) { - LOG_ERROR(Kernel_SVC, "Size is not less than 8GB, 0x{:016X}", size); - return ERR_INVALID_SIZE; - } - - const auto local_perms = static_cast<MemoryPermission>(local_permissions); - if (local_perms != MemoryPermission::Read && local_perms != MemoryPermission::ReadWrite) { - LOG_ERROR(Kernel_SVC, - "Invalid local memory permissions, expected Read or ReadWrite but got " - "local_permissions={}", - static_cast<u32>(local_permissions)); - return ERR_INVALID_MEMORY_PERMISSIONS; - } - - const auto remote_perms = static_cast<MemoryPermission>(remote_permissions); - if (remote_perms != MemoryPermission::Read && remote_perms != MemoryPermission::ReadWrite && - remote_perms != MemoryPermission::DontCare) { - LOG_ERROR(Kernel_SVC, - "Invalid remote memory permissions, expected Read, ReadWrite or DontCare but got " - "remote_permissions={}", - static_cast<u32>(remote_permissions)); - return ERR_INVALID_MEMORY_PERMISSIONS; - } - - auto& kernel = system.Kernel(); - auto process = kernel.CurrentProcess(); - auto& handle_table = process->GetHandleTable(); - auto shared_mem_handle = SharedMemory::Create(kernel, process, size, local_perms, remote_perms); - - CASCADE_RESULT(*handle, handle_table.Create(shared_mem_handle)); - return RESULT_SUCCESS; -} - static ResultCode CreateEvent(Core::System& system, Handle* write_handle, Handle* read_handle) { LOG_DEBUG(Kernel_SVC, "called"); @@ -2305,11 +1936,10 @@ static ResultCode GetProcessList(Core::System& system, u32* out_num_processes, } const auto& kernel = system.Kernel(); - const auto& vm_manager = kernel.CurrentProcess()->VMManager(); const auto total_copy_size = out_process_ids_size * sizeof(u64); - if (out_process_ids_size > 0 && - !vm_manager.IsWithinAddressSpace(out_process_ids, total_copy_size)) { + if (out_process_ids_size > 0 && !kernel.CurrentProcess()->PageTable().IsInsideAddressSpace( + out_process_ids, total_copy_size)) { LOG_ERROR(Kernel_SVC, "Address range outside address space. begin=0x{:016X}, end=0x{:016X}", out_process_ids, out_process_ids + total_copy_size); return ERR_INVALID_ADDRESS_STATE; @@ -2345,11 +1975,10 @@ static ResultCode GetThreadList(Core::System& system, u32* out_num_threads, VAdd } const auto* const current_process = system.Kernel().CurrentProcess(); - const auto& vm_manager = current_process->VMManager(); const auto total_copy_size = out_thread_ids_size * sizeof(u64); if (out_thread_ids_size > 0 && - !vm_manager.IsWithinAddressSpace(out_thread_ids, total_copy_size)) { + !current_process->PageTable().IsInsideAddressSpace(out_thread_ids, total_copy_size)) { LOG_ERROR(Kernel_SVC, "Address range outside address space. begin=0x{:016X}, end=0x{:016X}", out_thread_ids, out_thread_ids + total_copy_size); return ERR_INVALID_ADDRESS_STATE; @@ -2510,7 +2139,7 @@ static const FunctionDef SVC_Table_32[] = { static const FunctionDef SVC_Table_64[] = { {0x00, nullptr, "Unknown"}, {0x01, SvcWrap64<SetHeapSize>, "SetHeapSize"}, - {0x02, SvcWrap64<SetMemoryPermission>, "SetMemoryPermission"}, + {0x02, nullptr, "SetMemoryPermission"}, {0x03, SvcWrap64<SetMemoryAttribute>, "SetMemoryAttribute"}, {0x04, SvcWrap64<MapMemory>, "MapMemory"}, {0x05, SvcWrap64<UnmapMemory>, "UnmapMemory"}, @@ -2528,7 +2157,7 @@ static const FunctionDef SVC_Table_64[] = { {0x11, SvcWrap64<SignalEvent>, "SignalEvent"}, {0x12, SvcWrap64<ClearEvent>, "ClearEvent"}, {0x13, SvcWrap64<MapSharedMemory>, "MapSharedMemory"}, - {0x14, SvcWrap64<UnmapSharedMemory>, "UnmapSharedMemory"}, + {0x14, nullptr, "UnmapSharedMemory"}, {0x15, SvcWrap64<CreateTransferMemory>, "CreateTransferMemory"}, {0x16, SvcWrap64<CloseHandle>, "CloseHandle"}, {0x17, SvcWrap64<ResetSignal>, "ResetSignal"}, @@ -2588,9 +2217,9 @@ static const FunctionDef SVC_Table_64[] = { {0x4D, nullptr, "SleepSystem"}, {0x4E, nullptr, "ReadWriteRegister"}, {0x4F, nullptr, "SetProcessActivity"}, - {0x50, SvcWrap64<CreateSharedMemory>, "CreateSharedMemory"}, - {0x51, SvcWrap64<MapTransferMemory>, "MapTransferMemory"}, - {0x52, SvcWrap64<UnmapTransferMemory>, "UnmapTransferMemory"}, + {0x50, nullptr, "CreateSharedMemory"}, + {0x51, nullptr, "MapTransferMemory"}, + {0x52, nullptr, "UnmapTransferMemory"}, {0x53, nullptr, "CreateInterruptEvent"}, {0x54, nullptr, "QueryPhysicalAddress"}, {0x55, nullptr, "QueryIoMapping"}, @@ -2627,8 +2256,8 @@ static const FunctionDef SVC_Table_64[] = { {0x74, nullptr, "MapProcessMemory"}, {0x75, nullptr, "UnmapProcessMemory"}, {0x76, SvcWrap64<QueryProcessMemory>, "QueryProcessMemory"}, - {0x77, SvcWrap64<MapProcessCodeMemory>, "MapProcessCodeMemory"}, - {0x78, SvcWrap64<UnmapProcessCodeMemory>, "UnmapProcessCodeMemory"}, + {0x77, nullptr, "MapProcessCodeMemory"}, + {0x78, nullptr, "UnmapProcessCodeMemory"}, {0x79, nullptr, "CreateProcess"}, {0x7A, nullptr, "StartProcess"}, {0x7B, nullptr, "TerminateProcess"}, @@ -2656,7 +2285,7 @@ static const FunctionDef* GetSVCInfo64(u32 func_num) { MICROPROFILE_DEFINE(Kernel_SVC, "Kernel", "SVC", MP_RGB(70, 200, 70)); -void CallSVC(Core::System& system, u32 immediate) { +void Call(Core::System& system, u32 immediate) { MICROPROFILE_SCOPE(Kernel_SVC); // Lock the global kernel mutex when we enter the kernel HLE. @@ -2675,4 +2304,4 @@ void CallSVC(Core::System& system, u32 immediate) { } } -} // namespace Kernel +} // namespace Kernel::Svc diff --git a/src/core/hle/kernel/svc.h b/src/core/hle/kernel/svc.h index c5539ac1c..46e64277e 100644 --- a/src/core/hle/kernel/svc.h +++ b/src/core/hle/kernel/svc.h @@ -10,8 +10,8 @@ namespace Core { class System; } -namespace Kernel { +namespace Kernel::Svc { -void CallSVC(Core::System& system, u32 immediate); +void Call(Core::System& system, u32 immediate); -} // namespace Kernel +} // namespace Kernel::Svc diff --git a/src/core/hle/kernel/svc_types.h b/src/core/hle/kernel/svc_types.h new file mode 100644 index 000000000..986724beb --- /dev/null +++ b/src/core/hle/kernel/svc_types.h @@ -0,0 +1,68 @@ +// Copyright 2020 yuzu emulator team +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include "common/common_funcs.h" +#include "common/common_types.h" + +namespace Kernel::Svc { + +enum class MemoryState : u32 { + Free = 0x00, + Io = 0x01, + Static = 0x02, + Code = 0x03, + CodeData = 0x04, + Normal = 0x05, + Shared = 0x06, + Alias = 0x07, + AliasCode = 0x08, + AliasCodeData = 0x09, + Ipc = 0x0A, + Stack = 0x0B, + ThreadLocal = 0x0C, + Transfered = 0x0D, + SharedTransfered = 0x0E, + SharedCode = 0x0F, + Inaccessible = 0x10, + NonSecureIpc = 0x11, + NonDeviceIpc = 0x12, + Kernel = 0x13, + GeneratedCode = 0x14, + CodeOut = 0x15, +}; +DECLARE_ENUM_FLAG_OPERATORS(MemoryState); + +enum class MemoryAttribute : u32 { + Locked = (1 << 0), + IpcLocked = (1 << 1), + DeviceShared = (1 << 2), + Uncached = (1 << 3), +}; +DECLARE_ENUM_FLAG_OPERATORS(MemoryAttribute); + +enum class MemoryPermission : u32 { + None = (0 << 0), + Read = (1 << 0), + Write = (1 << 1), + Execute = (1 << 2), + ReadWrite = Read | Write, + ReadExecute = Read | Execute, + DontCare = (1 << 28), +}; +DECLARE_ENUM_FLAG_OPERATORS(MemoryPermission); + +struct MemoryInfo { + u64 addr{}; + u64 size{}; + MemoryState state{}; + MemoryAttribute attr{}; + MemoryPermission perm{}; + u32 ipc_refcount{}; + u32 device_refcount{}; + u32 padding{}; +}; + +} // namespace Kernel::Svc diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp index 83e956036..a919750a6 100644 --- a/src/core/hle/kernel/thread.cpp +++ b/src/core/hle/kernel/thread.cpp @@ -85,6 +85,7 @@ void Thread::ResumeFromWait() { ASSERT_MSG(wait_objects.empty(), "Thread is waking up while waiting for objects"); switch (status) { + case ThreadStatus::Paused: case ThreadStatus::WaitSynch: case ThreadStatus::WaitHLEEvent: case ThreadStatus::WaitSleep: @@ -92,6 +93,7 @@ void Thread::ResumeFromWait() { case ThreadStatus::WaitMutex: case ThreadStatus::WaitCondVar: case ThreadStatus::WaitArb: + case ThreadStatus::Dormant: break; case ThreadStatus::Ready: @@ -148,8 +150,7 @@ static void ResetThreadContext64(Core::ARM_Interface::ThreadContext64& context, context.pc = entry_point; context.sp = stack_top; // TODO(merry): Perform a hardware test to determine the below value. - // AHP = 0, DN = 1, FTZ = 1, RMode = Round towards zero - context.fpcr = 0x03C00000; + context.fpcr = 0; } ResultVal<std::shared_ptr<Thread>> Thread::Create(KernelCore& kernel, std::string name, diff --git a/src/core/hle/kernel/transfer_memory.cpp b/src/core/hle/kernel/transfer_memory.cpp index f2d3f8b49..765f408c3 100644 --- a/src/core/hle/kernel/transfer_memory.cpp +++ b/src/core/hle/kernel/transfer_memory.cpp @@ -2,17 +2,16 @@ // Licensed under GPLv2 or any later version // Refer to the license.txt file included. -#include "core/hle/kernel/errors.h" #include "core/hle/kernel/kernel.h" +#include "core/hle/kernel/memory/page_table.h" #include "core/hle/kernel/process.h" -#include "core/hle/kernel/shared_memory.h" #include "core/hle/kernel/transfer_memory.h" #include "core/hle/result.h" #include "core/memory.h" namespace Kernel { -TransferMemory::TransferMemory(KernelCore& kernel, Memory::Memory& memory) +TransferMemory::TransferMemory(KernelCore& kernel, Core::Memory::Memory& memory) : Object{kernel}, memory{memory} {} TransferMemory::~TransferMemory() { @@ -20,14 +19,15 @@ TransferMemory::~TransferMemory() { Reset(); } -std::shared_ptr<TransferMemory> TransferMemory::Create(KernelCore& kernel, Memory::Memory& memory, - VAddr base_address, u64 size, - MemoryPermission permissions) { +std::shared_ptr<TransferMemory> TransferMemory::Create(KernelCore& kernel, + Core::Memory::Memory& memory, + VAddr base_address, std::size_t size, + Memory::MemoryPermission permissions) { std::shared_ptr<TransferMemory> transfer_memory{ std::make_shared<TransferMemory>(kernel, memory)}; transfer_memory->base_address = base_address; - transfer_memory->memory_size = size; + transfer_memory->size = size; transfer_memory->owner_permissions = permissions; transfer_memory->owner_process = kernel.CurrentProcess(); @@ -38,98 +38,12 @@ const u8* TransferMemory::GetPointer() const { return memory.GetPointer(base_address); } -u64 TransferMemory::GetSize() const { - return memory_size; -} - -ResultCode TransferMemory::MapMemory(VAddr address, u64 size, MemoryPermission permissions) { - if (memory_size != size) { - return ERR_INVALID_SIZE; - } - - if (owner_permissions != permissions) { - return ERR_INVALID_STATE; - } - - if (is_mapped) { - return ERR_INVALID_STATE; - } - - backing_block = std::make_shared<PhysicalMemory>(size); - - const auto map_state = owner_permissions == MemoryPermission::None - ? MemoryState::TransferMemoryIsolated - : MemoryState::TransferMemory; - auto& vm_manager = owner_process->VMManager(); - const auto map_result = vm_manager.MapMemoryBlock(address, backing_block, 0, size, map_state); - if (map_result.Failed()) { - return map_result.Code(); - } - - is_mapped = true; - return RESULT_SUCCESS; -} - ResultCode TransferMemory::Reserve() { - auto& vm_manager{owner_process->VMManager()}; - const auto check_range_result{vm_manager.CheckRangeState( - base_address, memory_size, MemoryState::FlagTransfer | MemoryState::FlagMemoryPoolAllocated, - MemoryState::FlagTransfer | MemoryState::FlagMemoryPoolAllocated, VMAPermission::All, - VMAPermission::ReadWrite, MemoryAttribute::Mask, MemoryAttribute::None, - MemoryAttribute::IpcAndDeviceMapped)}; - - if (check_range_result.Failed()) { - return check_range_result.Code(); - } - - auto [state_, permissions_, attribute] = *check_range_result; - - if (const auto result{vm_manager.ReprotectRange( - base_address, memory_size, SharedMemory::ConvertPermissions(owner_permissions))}; - result.IsError()) { - return result; - } - - return vm_manager.SetMemoryAttribute(base_address, memory_size, MemoryAttribute::Mask, - attribute | MemoryAttribute::Locked); + return owner_process->PageTable().ReserveTransferMemory(base_address, size, owner_permissions); } ResultCode TransferMemory::Reset() { - auto& vm_manager{owner_process->VMManager()}; - if (const auto result{vm_manager.CheckRangeState( - base_address, memory_size, - MemoryState::FlagTransfer | MemoryState::FlagMemoryPoolAllocated, - MemoryState::FlagTransfer | MemoryState::FlagMemoryPoolAllocated, VMAPermission::None, - VMAPermission::None, MemoryAttribute::Mask, MemoryAttribute::Locked, - MemoryAttribute::IpcAndDeviceMapped)}; - result.Failed()) { - return result.Code(); - } - - if (const auto result{ - vm_manager.ReprotectRange(base_address, memory_size, VMAPermission::ReadWrite)}; - result.IsError()) { - return result; - } - - return vm_manager.SetMemoryAttribute(base_address, memory_size, MemoryAttribute::Mask, - MemoryAttribute::None); -} - -ResultCode TransferMemory::UnmapMemory(VAddr address, u64 size) { - if (memory_size != size) { - return ERR_INVALID_SIZE; - } - - auto& vm_manager = owner_process->VMManager(); - const auto result = vm_manager.UnmapRange(address, size); - - if (result.IsError()) { - return result; - } - - is_mapped = false; - return RESULT_SUCCESS; + return owner_process->PageTable().ResetTransferMemory(base_address, size); } } // namespace Kernel diff --git a/src/core/hle/kernel/transfer_memory.h b/src/core/hle/kernel/transfer_memory.h index 6e388536a..05e9f7464 100644 --- a/src/core/hle/kernel/transfer_memory.h +++ b/src/core/hle/kernel/transfer_memory.h @@ -6,12 +6,13 @@ #include <memory> +#include "core/hle/kernel/memory/memory_block.h" #include "core/hle/kernel/object.h" #include "core/hle/kernel/physical_memory.h" union ResultCode; -namespace Memory { +namespace Core::Memory { class Memory; } @@ -20,8 +21,6 @@ namespace Kernel { class KernelCore; class Process; -enum class MemoryPermission : u32; - /// Defines the interface for transfer memory objects. /// /// Transfer memory is typically used for the purpose of @@ -30,14 +29,14 @@ enum class MemoryPermission : u32; /// class TransferMemory final : public Object { public: - explicit TransferMemory(KernelCore& kernel, Memory::Memory& memory); + explicit TransferMemory(KernelCore& kernel, Core::Memory::Memory& memory); ~TransferMemory() override; static constexpr HandleType HANDLE_TYPE = HandleType::TransferMemory; - static std::shared_ptr<TransferMemory> Create(KernelCore& kernel, Memory::Memory& memory, - VAddr base_address, u64 size, - MemoryPermission permissions); + static std::shared_ptr<TransferMemory> Create(KernelCore& kernel, Core::Memory::Memory& memory, + VAddr base_address, std::size_t size, + Memory::MemoryPermission permissions); TransferMemory(const TransferMemory&) = delete; TransferMemory& operator=(const TransferMemory&) = delete; @@ -61,29 +60,9 @@ public: const u8* GetPointer() const; /// Gets the size of the memory backing this instance in bytes. - u64 GetSize() const; - - /// Attempts to map transfer memory with the given range and memory permissions. - /// - /// @param address The base address to being mapping memory at. - /// @param size The size of the memory to map, in bytes. - /// @param permissions The memory permissions to check against when mapping memory. - /// - /// @pre The given address, size, and memory permissions must all match - /// the same values that were given when creating the transfer memory - /// instance. - /// - ResultCode MapMemory(VAddr address, u64 size, MemoryPermission permissions); - - /// Unmaps the transfer memory with the given range - /// - /// @param address The base address to begin unmapping memory at. - /// @param size The size of the memory to unmap, in bytes. - /// - /// @pre The given address and size must be the same as the ones used - /// to create the transfer memory instance. - /// - ResultCode UnmapMemory(VAddr address, u64 size); + constexpr std::size_t GetSize() const { + return size; + } /// Reserves the region to be used for the transfer memory, called after the transfer memory is /// created. @@ -94,25 +73,19 @@ public: ResultCode Reset(); private: - /// Memory block backing this instance. - std::shared_ptr<PhysicalMemory> backing_block; - /// The base address for the memory managed by this instance. - VAddr base_address = 0; + VAddr base_address{}; /// Size of the memory, in bytes, that this instance manages. - u64 memory_size = 0; + std::size_t size{}; /// The memory permissions that are applied to this instance. - MemoryPermission owner_permissions{}; + Memory::MemoryPermission owner_permissions{}; /// The process that this transfer memory instance was created under. - Process* owner_process = nullptr; - - /// Whether or not this transfer memory instance has mapped memory. - bool is_mapped = false; + Process* owner_process{}; - Memory::Memory& memory; + Core::Memory::Memory& memory; }; } // namespace Kernel diff --git a/src/core/hle/kernel/vm_manager.cpp b/src/core/hle/kernel/vm_manager.cpp deleted file mode 100644 index 024c22901..000000000 --- a/src/core/hle/kernel/vm_manager.cpp +++ /dev/null @@ -1,1175 +0,0 @@ -// Copyright 2015 Citra Emulator Project -// Licensed under GPLv2 or any later version -// Refer to the license.txt file included. - -#include <algorithm> -#include <cstring> -#include <iterator> -#include <utility> -#include "common/alignment.h" -#include "common/assert.h" -#include "common/logging/log.h" -#include "common/memory_hook.h" -#include "core/core.h" -#include "core/file_sys/program_metadata.h" -#include "core/hle/kernel/errors.h" -#include "core/hle/kernel/process.h" -#include "core/hle/kernel/resource_limit.h" -#include "core/hle/kernel/vm_manager.h" -#include "core/memory.h" - -namespace Kernel { -namespace { -const char* GetMemoryStateName(MemoryState state) { - static constexpr const char* names[] = { - "Unmapped", "Io", - "Normal", "Code", - "CodeData", "Heap", - "Shared", "Unknown1", - "ModuleCode", "ModuleCodeData", - "IpcBuffer0", "Stack", - "ThreadLocal", "TransferMemoryIsolated", - "TransferMemory", "ProcessMemory", - "Inaccessible", "IpcBuffer1", - "IpcBuffer3", "KernelStack", - }; - - return names[ToSvcMemoryState(state)]; -} - -// Checks if a given address range lies within a larger address range. -constexpr bool IsInsideAddressRange(VAddr address, u64 size, VAddr address_range_begin, - VAddr address_range_end) { - const VAddr end_address = address + size - 1; - return address_range_begin <= address && end_address <= address_range_end - 1; -} -} // Anonymous namespace - -bool VirtualMemoryArea::CanBeMergedWith(const VirtualMemoryArea& next) const { - ASSERT(base + size == next.base); - if (permissions != next.permissions || state != next.state || attribute != next.attribute || - type != next.type) { - return false; - } - if ((attribute & MemoryAttribute::DeviceMapped) == MemoryAttribute::DeviceMapped) { - // TODO: Can device mapped memory be merged sanely? - // Not merging it may cause inaccuracies versus hardware when memory layout is queried. - return false; - } - if (type == VMAType::AllocatedMemoryBlock) { - return true; - } - if (type == VMAType::BackingMemory && backing_memory + size != next.backing_memory) { - return false; - } - if (type == VMAType::MMIO && paddr + size != next.paddr) { - return false; - } - return true; -} - -VMManager::VMManager(Core::System& system) : system{system} { - // Default to assuming a 39-bit address space. This way we have a sane - // starting point with executables that don't provide metadata. - Reset(FileSys::ProgramAddressSpaceType::Is39Bit); -} - -VMManager::~VMManager() = default; - -void VMManager::Reset(FileSys::ProgramAddressSpaceType type) { - Clear(); - - InitializeMemoryRegionRanges(type); - - page_table.Resize(address_space_width); - - // Initialize the map with a single free region covering the entire managed space. - VirtualMemoryArea initial_vma; - initial_vma.size = address_space_end; - vma_map.emplace(initial_vma.base, initial_vma); - - UpdatePageTableForVMA(initial_vma); -} - -VMManager::VMAHandle VMManager::FindVMA(VAddr target) const { - if (target >= address_space_end) { - return vma_map.end(); - } else { - return std::prev(vma_map.upper_bound(target)); - } -} - -bool VMManager::IsValidHandle(VMAHandle handle) const { - return handle != vma_map.cend(); -} - -ResultVal<VMManager::VMAHandle> VMManager::MapMemoryBlock(VAddr target, - std::shared_ptr<PhysicalMemory> block, - std::size_t offset, u64 size, - MemoryState state, VMAPermission perm) { - ASSERT(block != nullptr); - ASSERT(offset + size <= block->size()); - - // This is the appropriately sized VMA that will turn into our allocation. - CASCADE_RESULT(VMAIter vma_handle, CarveVMA(target, size)); - VirtualMemoryArea& final_vma = vma_handle->second; - ASSERT(final_vma.size == size); - - final_vma.type = VMAType::AllocatedMemoryBlock; - final_vma.permissions = perm; - final_vma.state = state; - final_vma.backing_block = std::move(block); - final_vma.offset = offset; - UpdatePageTableForVMA(final_vma); - - return MakeResult<VMAHandle>(MergeAdjacent(vma_handle)); -} - -ResultVal<VMManager::VMAHandle> VMManager::MapBackingMemory(VAddr target, u8* memory, u64 size, - MemoryState state) { - ASSERT(memory != nullptr); - - // This is the appropriately sized VMA that will turn into our allocation. - CASCADE_RESULT(VMAIter vma_handle, CarveVMA(target, size)); - VirtualMemoryArea& final_vma = vma_handle->second; - ASSERT(final_vma.size == size); - - final_vma.type = VMAType::BackingMemory; - final_vma.permissions = VMAPermission::ReadWrite; - final_vma.state = state; - final_vma.backing_memory = memory; - UpdatePageTableForVMA(final_vma); - - return MakeResult<VMAHandle>(MergeAdjacent(vma_handle)); -} - -ResultVal<VAddr> VMManager::FindFreeRegion(u64 size) const { - return FindFreeRegion(GetASLRRegionBaseAddress(), GetASLRRegionEndAddress(), size); -} - -ResultVal<VAddr> VMManager::FindFreeRegion(VAddr begin, VAddr end, u64 size) const { - ASSERT(begin < end); - ASSERT(size <= end - begin); - - const VMAHandle vma_handle = - std::find_if(vma_map.begin(), vma_map.end(), [begin, end, size](const auto& vma) { - if (vma.second.type != VMAType::Free) { - return false; - } - const VAddr vma_base = vma.second.base; - const VAddr vma_end = vma_base + vma.second.size; - const VAddr assumed_base = (begin < vma_base) ? vma_base : begin; - const VAddr used_range = assumed_base + size; - - return vma_base <= assumed_base && assumed_base < used_range && used_range < end && - used_range <= vma_end; - }); - - if (vma_handle == vma_map.cend()) { - // TODO(Subv): Find the correct error code here. - return RESULT_UNKNOWN; - } - - const VAddr target = std::max(begin, vma_handle->second.base); - return MakeResult<VAddr>(target); -} - -ResultVal<VMManager::VMAHandle> VMManager::MapMMIO(VAddr target, PAddr paddr, u64 size, - MemoryState state, - Common::MemoryHookPointer mmio_handler) { - // This is the appropriately sized VMA that will turn into our allocation. - CASCADE_RESULT(VMAIter vma_handle, CarveVMA(target, size)); - VirtualMemoryArea& final_vma = vma_handle->second; - ASSERT(final_vma.size == size); - - final_vma.type = VMAType::MMIO; - final_vma.permissions = VMAPermission::ReadWrite; - final_vma.state = state; - final_vma.paddr = paddr; - final_vma.mmio_handler = std::move(mmio_handler); - UpdatePageTableForVMA(final_vma); - - return MakeResult<VMAHandle>(MergeAdjacent(vma_handle)); -} - -VMManager::VMAIter VMManager::Unmap(VMAIter vma_handle) { - VirtualMemoryArea& vma = vma_handle->second; - vma.type = VMAType::Free; - vma.permissions = VMAPermission::None; - vma.state = MemoryState::Unmapped; - vma.attribute = MemoryAttribute::None; - - vma.backing_block = nullptr; - vma.offset = 0; - vma.backing_memory = nullptr; - vma.paddr = 0; - - UpdatePageTableForVMA(vma); - - return MergeAdjacent(vma_handle); -} - -ResultCode VMManager::UnmapRange(VAddr target, u64 size) { - CASCADE_RESULT(VMAIter vma, CarveVMARange(target, size)); - const VAddr target_end = target + size; - - const VMAIter end = vma_map.end(); - // The comparison against the end of the range must be done using addresses since VMAs can be - // merged during this process, causing invalidation of the iterators. - while (vma != end && vma->second.base < target_end) { - vma = std::next(Unmap(vma)); - } - - ASSERT(FindVMA(target)->second.size >= size); - - return RESULT_SUCCESS; -} - -VMManager::VMAHandle VMManager::Reprotect(VMAHandle vma_handle, VMAPermission new_perms) { - VMAIter iter = StripIterConstness(vma_handle); - - VirtualMemoryArea& vma = iter->second; - vma.permissions = new_perms; - UpdatePageTableForVMA(vma); - - return MergeAdjacent(iter); -} - -ResultCode VMManager::ReprotectRange(VAddr target, u64 size, VMAPermission new_perms) { - CASCADE_RESULT(VMAIter vma, CarveVMARange(target, size)); - const VAddr target_end = target + size; - - const VMAIter end = vma_map.end(); - // The comparison against the end of the range must be done using addresses since VMAs can be - // merged during this process, causing invalidation of the iterators. - while (vma != end && vma->second.base < target_end) { - vma = std::next(StripIterConstness(Reprotect(vma, new_perms))); - } - - return RESULT_SUCCESS; -} - -ResultVal<VAddr> VMManager::SetHeapSize(u64 size) { - if (size > GetHeapRegionSize()) { - return ERR_OUT_OF_MEMORY; - } - - // No need to do any additional work if the heap is already the given size. - if (size == GetCurrentHeapSize()) { - return MakeResult(heap_region_base); - } - - if (heap_memory == nullptr) { - // Initialize heap - heap_memory = std::make_shared<PhysicalMemory>(size); - heap_end = heap_region_base + size; - } else { - UnmapRange(heap_region_base, GetCurrentHeapSize()); - } - - // If necessary, expand backing vector to cover new heap extents in - // the case of allocating. Otherwise, shrink the backing memory, - // if a smaller heap has been requested. - heap_memory->resize(size); - heap_memory->shrink_to_fit(); - RefreshMemoryBlockMappings(heap_memory.get()); - - heap_end = heap_region_base + size; - ASSERT(GetCurrentHeapSize() == heap_memory->size()); - - const auto mapping_result = - MapMemoryBlock(heap_region_base, heap_memory, 0, size, MemoryState::Heap); - if (mapping_result.Failed()) { - return mapping_result.Code(); - } - - return MakeResult<VAddr>(heap_region_base); -} - -ResultCode VMManager::MapPhysicalMemory(VAddr target, u64 size) { - // Check how much memory we've already mapped. - const auto mapped_size_result = SizeOfAllocatedVMAsInRange(target, size); - if (mapped_size_result.Failed()) { - return mapped_size_result.Code(); - } - - // If we've already mapped the desired amount, return early. - const std::size_t mapped_size = *mapped_size_result; - if (mapped_size == size) { - return RESULT_SUCCESS; - } - - // Check that we can map the memory we want. - const auto res_limit = system.CurrentProcess()->GetResourceLimit(); - const u64 physmem_remaining = res_limit->GetMaxResourceValue(ResourceType::PhysicalMemory) - - res_limit->GetCurrentResourceValue(ResourceType::PhysicalMemory); - if (physmem_remaining < (size - mapped_size)) { - return ERR_RESOURCE_LIMIT_EXCEEDED; - } - - // Keep track of the memory regions we unmap. - std::vector<std::pair<u64, u64>> mapped_regions; - ResultCode result = RESULT_SUCCESS; - - // Iterate, trying to map memory. - { - const auto end_addr = target + size; - const auto last_addr = end_addr - 1; - VAddr cur_addr = target; - - auto iter = FindVMA(target); - ASSERT(iter != vma_map.end()); - - while (true) { - const auto& vma = iter->second; - const auto vma_start = vma.base; - const auto vma_end = vma_start + vma.size; - const auto vma_last = vma_end - 1; - - // Map the memory block - const auto map_size = std::min(end_addr - cur_addr, vma_end - cur_addr); - if (vma.state == MemoryState::Unmapped) { - const auto map_res = - MapMemoryBlock(cur_addr, std::make_shared<PhysicalMemory>(map_size), 0, - map_size, MemoryState::Heap, VMAPermission::ReadWrite); - result = map_res.Code(); - if (result.IsError()) { - break; - } - - mapped_regions.emplace_back(cur_addr, map_size); - } - - // Break once we hit the end of the range. - if (last_addr <= vma_last) { - break; - } - - // Advance to the next block. - cur_addr = vma_end; - iter = FindVMA(cur_addr); - ASSERT(iter != vma_map.end()); - } - } - - // If we failed, unmap memory. - if (result.IsError()) { - for (const auto [unmap_address, unmap_size] : mapped_regions) { - ASSERT_MSG(UnmapRange(unmap_address, unmap_size).IsSuccess(), - "Failed to unmap memory range."); - } - - return result; - } - - // Update amount of mapped physical memory. - physical_memory_mapped += size - mapped_size; - - return RESULT_SUCCESS; -} - -ResultCode VMManager::UnmapPhysicalMemory(VAddr target, u64 size) { - // Check how much memory is currently mapped. - const auto mapped_size_result = SizeOfUnmappablePhysicalMemoryInRange(target, size); - if (mapped_size_result.Failed()) { - return mapped_size_result.Code(); - } - - // If we've already unmapped all the memory, return early. - const std::size_t mapped_size = *mapped_size_result; - if (mapped_size == 0) { - return RESULT_SUCCESS; - } - - // Keep track of the memory regions we unmap. - std::vector<std::pair<u64, u64>> unmapped_regions; - ResultCode result = RESULT_SUCCESS; - - // Try to unmap regions. - { - const auto end_addr = target + size; - const auto last_addr = end_addr - 1; - VAddr cur_addr = target; - - auto iter = FindVMA(target); - ASSERT(iter != vma_map.end()); - - while (true) { - const auto& vma = iter->second; - const auto vma_start = vma.base; - const auto vma_end = vma_start + vma.size; - const auto vma_last = vma_end - 1; - - // Unmap the memory block - const auto unmap_size = std::min(end_addr - cur_addr, vma_end - cur_addr); - if (vma.state == MemoryState::Heap) { - result = UnmapRange(cur_addr, unmap_size); - if (result.IsError()) { - break; - } - - unmapped_regions.emplace_back(cur_addr, unmap_size); - } - - // Break once we hit the end of the range. - if (last_addr <= vma_last) { - break; - } - - // Advance to the next block. - cur_addr = vma_end; - iter = FindVMA(cur_addr); - ASSERT(iter != vma_map.end()); - } - } - - // If we failed, re-map regions. - // TODO: Preserve memory contents? - if (result.IsError()) { - for (const auto [map_address, map_size] : unmapped_regions) { - const auto remap_res = - MapMemoryBlock(map_address, std::make_shared<PhysicalMemory>(map_size), 0, map_size, - MemoryState::Heap, VMAPermission::None); - ASSERT_MSG(remap_res.Succeeded(), "Failed to remap a memory block."); - } - - return result; - } - - // Update mapped amount - physical_memory_mapped -= mapped_size; - - return RESULT_SUCCESS; -} - -ResultCode VMManager::MapCodeMemory(VAddr dst_address, VAddr src_address, u64 size) { - constexpr auto ignore_attribute = MemoryAttribute::LockedForIPC | MemoryAttribute::DeviceMapped; - const auto src_check_result = CheckRangeState( - src_address, size, MemoryState::All, MemoryState::Heap, VMAPermission::All, - VMAPermission::ReadWrite, MemoryAttribute::Mask, MemoryAttribute::None, ignore_attribute); - - if (src_check_result.Failed()) { - return src_check_result.Code(); - } - - const auto mirror_result = - MirrorMemory(dst_address, src_address, size, MemoryState::ModuleCode); - if (mirror_result.IsError()) { - return mirror_result; - } - - // Ensure we lock the source memory region. - const auto src_vma_result = CarveVMARange(src_address, size); - if (src_vma_result.Failed()) { - return src_vma_result.Code(); - } - auto src_vma_iter = *src_vma_result; - src_vma_iter->second.attribute = MemoryAttribute::Locked; - Reprotect(src_vma_iter, VMAPermission::Read); - - // The destination memory region is fine as is, however we need to make it read-only. - return ReprotectRange(dst_address, size, VMAPermission::Read); -} - -ResultCode VMManager::UnmapCodeMemory(VAddr dst_address, VAddr src_address, u64 size) { - constexpr auto ignore_attribute = MemoryAttribute::LockedForIPC | MemoryAttribute::DeviceMapped; - const auto src_check_result = CheckRangeState( - src_address, size, MemoryState::All, MemoryState::Heap, VMAPermission::None, - VMAPermission::None, MemoryAttribute::Mask, MemoryAttribute::Locked, ignore_attribute); - - if (src_check_result.Failed()) { - return src_check_result.Code(); - } - - // Yes, the kernel only checks the first page of the region. - const auto dst_check_result = - CheckRangeState(dst_address, Memory::PAGE_SIZE, MemoryState::FlagModule, - MemoryState::FlagModule, VMAPermission::None, VMAPermission::None, - MemoryAttribute::Mask, MemoryAttribute::None, ignore_attribute); - - if (dst_check_result.Failed()) { - return dst_check_result.Code(); - } - - const auto dst_memory_state = std::get<MemoryState>(*dst_check_result); - const auto dst_contiguous_check_result = CheckRangeState( - dst_address, size, MemoryState::All, dst_memory_state, VMAPermission::None, - VMAPermission::None, MemoryAttribute::Mask, MemoryAttribute::None, ignore_attribute); - - if (dst_contiguous_check_result.Failed()) { - return dst_contiguous_check_result.Code(); - } - - const auto unmap_result = UnmapRange(dst_address, size); - if (unmap_result.IsError()) { - return unmap_result; - } - - // With the mirrored portion unmapped, restore the original region's traits. - const auto src_vma_result = CarveVMARange(src_address, size); - if (src_vma_result.Failed()) { - return src_vma_result.Code(); - } - auto src_vma_iter = *src_vma_result; - src_vma_iter->second.state = MemoryState::Heap; - src_vma_iter->second.attribute = MemoryAttribute::None; - Reprotect(src_vma_iter, VMAPermission::ReadWrite); - - if (dst_memory_state == MemoryState::ModuleCode) { - system.InvalidateCpuInstructionCaches(); - } - - return unmap_result; -} - -MemoryInfo VMManager::QueryMemory(VAddr address) const { - const auto vma = FindVMA(address); - MemoryInfo memory_info{}; - - if (IsValidHandle(vma)) { - memory_info.base_address = vma->second.base; - memory_info.attributes = ToSvcMemoryAttribute(vma->second.attribute); - memory_info.permission = static_cast<u32>(vma->second.permissions); - memory_info.size = vma->second.size; - memory_info.state = ToSvcMemoryState(vma->second.state); - } else { - memory_info.base_address = address_space_end; - memory_info.permission = static_cast<u32>(VMAPermission::None); - memory_info.size = 0 - address_space_end; - memory_info.state = static_cast<u32>(MemoryState::Inaccessible); - } - - return memory_info; -} - -ResultCode VMManager::SetMemoryAttribute(VAddr address, u64 size, MemoryAttribute mask, - MemoryAttribute attribute) { - constexpr auto ignore_mask = - MemoryAttribute::Uncached | MemoryAttribute::DeviceMapped | MemoryAttribute::Locked; - constexpr auto attribute_mask = ~ignore_mask; - - const auto result = CheckRangeState( - address, size, MemoryState::FlagUncached, MemoryState::FlagUncached, VMAPermission::None, - VMAPermission::None, attribute_mask, MemoryAttribute::None, ignore_mask); - - if (result.Failed()) { - return result.Code(); - } - - const auto [prev_state, prev_permissions, prev_attributes] = *result; - const auto new_attribute = (prev_attributes & ~mask) | (mask & attribute); - - const auto carve_result = CarveVMARange(address, size); - if (carve_result.Failed()) { - return carve_result.Code(); - } - - auto vma_iter = *carve_result; - vma_iter->second.attribute = new_attribute; - - MergeAdjacent(vma_iter); - return RESULT_SUCCESS; -} - -ResultCode VMManager::MirrorMemory(VAddr dst_addr, VAddr src_addr, u64 size, MemoryState state) { - const auto vma = FindVMA(src_addr); - - ASSERT_MSG(vma != vma_map.end(), "Invalid memory address"); - ASSERT_MSG(vma->second.backing_block, "Backing block doesn't exist for address"); - - // The returned VMA might be a bigger one encompassing the desired address. - const auto vma_offset = src_addr - vma->first; - ASSERT_MSG(vma_offset + size <= vma->second.size, - "Shared memory exceeds bounds of mapped block"); - - const std::shared_ptr<PhysicalMemory>& backing_block = vma->second.backing_block; - const std::size_t backing_block_offset = vma->second.offset + vma_offset; - - CASCADE_RESULT(auto new_vma, - MapMemoryBlock(dst_addr, backing_block, backing_block_offset, size, state)); - // Protect mirror with permissions from old region - Reprotect(new_vma, vma->second.permissions); - // Remove permissions from old region - ReprotectRange(src_addr, size, VMAPermission::None); - - return RESULT_SUCCESS; -} - -void VMManager::RefreshMemoryBlockMappings(const PhysicalMemory* block) { - // If this ever proves to have a noticeable performance impact, allow users of the function to - // specify a specific range of addresses to limit the scan to. - for (const auto& p : vma_map) { - const VirtualMemoryArea& vma = p.second; - if (block == vma.backing_block.get()) { - UpdatePageTableForVMA(vma); - } - } -} - -void VMManager::LogLayout() const { - for (const auto& p : vma_map) { - const VirtualMemoryArea& vma = p.second; - LOG_DEBUG(Kernel, "{:016X} - {:016X} size: {:016X} {}{}{} {}", vma.base, - vma.base + vma.size, vma.size, - (u8)vma.permissions & (u8)VMAPermission::Read ? 'R' : '-', - (u8)vma.permissions & (u8)VMAPermission::Write ? 'W' : '-', - (u8)vma.permissions & (u8)VMAPermission::Execute ? 'X' : '-', - GetMemoryStateName(vma.state)); - } -} - -VMManager::VMAIter VMManager::StripIterConstness(const VMAHandle& iter) { - // This uses a neat C++ trick to convert a const_iterator to a regular iterator, given - // non-const access to its container. - return vma_map.erase(iter, iter); // Erases an empty range of elements -} - -ResultVal<VMManager::VMAIter> VMManager::CarveVMA(VAddr base, u64 size) { - ASSERT_MSG((size & Memory::PAGE_MASK) == 0, "non-page aligned size: 0x{:016X}", size); - ASSERT_MSG((base & Memory::PAGE_MASK) == 0, "non-page aligned base: 0x{:016X}", base); - - VMAIter vma_handle = StripIterConstness(FindVMA(base)); - if (vma_handle == vma_map.end()) { - // Target address is outside the range managed by the kernel - return ERR_INVALID_ADDRESS; - } - - const VirtualMemoryArea& vma = vma_handle->second; - if (vma.type != VMAType::Free) { - // Region is already allocated - return ERR_INVALID_ADDRESS_STATE; - } - - const VAddr start_in_vma = base - vma.base; - const VAddr end_in_vma = start_in_vma + size; - - if (end_in_vma > vma.size) { - // Requested allocation doesn't fit inside VMA - return ERR_INVALID_ADDRESS_STATE; - } - - if (end_in_vma != vma.size) { - // Split VMA at the end of the allocated region - SplitVMA(vma_handle, end_in_vma); - } - if (start_in_vma != 0) { - // Split VMA at the start of the allocated region - vma_handle = SplitVMA(vma_handle, start_in_vma); - } - - return MakeResult<VMAIter>(vma_handle); -} - -ResultVal<VMManager::VMAIter> VMManager::CarveVMARange(VAddr target, u64 size) { - ASSERT_MSG((size & Memory::PAGE_MASK) == 0, "non-page aligned size: 0x{:016X}", size); - ASSERT_MSG((target & Memory::PAGE_MASK) == 0, "non-page aligned base: 0x{:016X}", target); - - const VAddr target_end = target + size; - ASSERT(target_end >= target); - ASSERT(target_end <= address_space_end); - ASSERT(size > 0); - - VMAIter begin_vma = StripIterConstness(FindVMA(target)); - const VMAIter i_end = vma_map.lower_bound(target_end); - if (std::any_of(begin_vma, i_end, - [](const auto& entry) { return entry.second.type == VMAType::Free; })) { - return ERR_INVALID_ADDRESS_STATE; - } - - if (target != begin_vma->second.base) { - begin_vma = SplitVMA(begin_vma, target - begin_vma->second.base); - } - - VMAIter end_vma = StripIterConstness(FindVMA(target_end)); - if (end_vma != vma_map.end() && target_end != end_vma->second.base) { - end_vma = SplitVMA(end_vma, target_end - end_vma->second.base); - } - - return MakeResult<VMAIter>(begin_vma); -} - -VMManager::VMAIter VMManager::SplitVMA(VMAIter vma_handle, u64 offset_in_vma) { - VirtualMemoryArea& old_vma = vma_handle->second; - VirtualMemoryArea new_vma = old_vma; // Make a copy of the VMA - - // For now, don't allow no-op VMA splits (trying to split at a boundary) because it's probably - // a bug. This restriction might be removed later. - ASSERT(offset_in_vma < old_vma.size); - ASSERT(offset_in_vma > 0); - - old_vma.size = offset_in_vma; - new_vma.base += offset_in_vma; - new_vma.size -= offset_in_vma; - - switch (new_vma.type) { - case VMAType::Free: - break; - case VMAType::AllocatedMemoryBlock: - new_vma.offset += offset_in_vma; - break; - case VMAType::BackingMemory: - new_vma.backing_memory += offset_in_vma; - break; - case VMAType::MMIO: - new_vma.paddr += offset_in_vma; - break; - } - - ASSERT(old_vma.CanBeMergedWith(new_vma)); - - return vma_map.emplace_hint(std::next(vma_handle), new_vma.base, new_vma); -} - -VMManager::VMAIter VMManager::MergeAdjacent(VMAIter iter) { - const VMAIter next_vma = std::next(iter); - if (next_vma != vma_map.end() && iter->second.CanBeMergedWith(next_vma->second)) { - MergeAdjacentVMA(iter->second, next_vma->second); - vma_map.erase(next_vma); - } - - if (iter != vma_map.begin()) { - VMAIter prev_vma = std::prev(iter); - if (prev_vma->second.CanBeMergedWith(iter->second)) { - MergeAdjacentVMA(prev_vma->second, iter->second); - vma_map.erase(iter); - iter = prev_vma; - } - } - - return iter; -} - -void VMManager::MergeAdjacentVMA(VirtualMemoryArea& left, const VirtualMemoryArea& right) { - ASSERT(left.CanBeMergedWith(right)); - - // Always merge allocated memory blocks, even when they don't share the same backing block. - if (left.type == VMAType::AllocatedMemoryBlock && - (left.backing_block != right.backing_block || left.offset + left.size != right.offset)) { - - // Check if we can save work. - if (left.offset == 0 && left.size == left.backing_block->size()) { - // Fast case: left is an entire backing block. - left.backing_block->resize(left.size + right.size); - std::memcpy(left.backing_block->data() + left.size, - right.backing_block->data() + right.offset, right.size); - } else { - // Slow case: make a new memory block for left and right. - auto new_memory = std::make_shared<PhysicalMemory>(); - new_memory->resize(left.size + right.size); - std::memcpy(new_memory->data(), left.backing_block->data() + left.offset, left.size); - std::memcpy(new_memory->data() + left.size, right.backing_block->data() + right.offset, - right.size); - - left.backing_block = std::move(new_memory); - left.offset = 0; - } - - // Page table update is needed, because backing memory changed. - left.size += right.size; - UpdatePageTableForVMA(left); - } else { - // Just update the size. - left.size += right.size; - } -} - -void VMManager::UpdatePageTableForVMA(const VirtualMemoryArea& vma) { - auto& memory = system.Memory(); - - switch (vma.type) { - case VMAType::Free: - memory.UnmapRegion(page_table, vma.base, vma.size); - break; - case VMAType::AllocatedMemoryBlock: - memory.MapMemoryRegion(page_table, vma.base, vma.size, *vma.backing_block, vma.offset); - break; - case VMAType::BackingMemory: - memory.MapMemoryRegion(page_table, vma.base, vma.size, vma.backing_memory); - break; - case VMAType::MMIO: - memory.MapIoRegion(page_table, vma.base, vma.size, vma.mmio_handler); - break; - } -} - -void VMManager::InitializeMemoryRegionRanges(FileSys::ProgramAddressSpaceType type) { - u64 map_region_size = 0; - u64 heap_region_size = 0; - u64 stack_region_size = 0; - u64 tls_io_region_size = 0; - - u64 stack_and_tls_io_end = 0; - - switch (type) { - case FileSys::ProgramAddressSpaceType::Is32Bit: - case FileSys::ProgramAddressSpaceType::Is32BitNoMap: - address_space_width = 32; - code_region_base = 0x200000; - code_region_end = code_region_base + 0x3FE00000; - aslr_region_base = 0x200000; - aslr_region_end = aslr_region_base + 0xFFE00000; - if (type == FileSys::ProgramAddressSpaceType::Is32Bit) { - map_region_size = 0x40000000; - heap_region_size = 0x40000000; - } else { - map_region_size = 0; - heap_region_size = 0x80000000; - } - stack_and_tls_io_end = 0x40000000; - break; - case FileSys::ProgramAddressSpaceType::Is36Bit: - address_space_width = 36; - code_region_base = 0x8000000; - code_region_end = code_region_base + 0x78000000; - aslr_region_base = 0x8000000; - aslr_region_end = aslr_region_base + 0xFF8000000; - map_region_size = 0x180000000; - heap_region_size = 0x180000000; - stack_and_tls_io_end = 0x80000000; - break; - case FileSys::ProgramAddressSpaceType::Is39Bit: - address_space_width = 39; - code_region_base = 0x8000000; - code_region_end = code_region_base + 0x80000000; - aslr_region_base = 0x8000000; - aslr_region_end = aslr_region_base + 0x7FF8000000; - map_region_size = 0x1000000000; - heap_region_size = 0x180000000; - stack_region_size = 0x80000000; - tls_io_region_size = 0x1000000000; - break; - default: - UNREACHABLE_MSG("Invalid address space type specified: {}", static_cast<u32>(type)); - return; - } - - const u64 stack_and_tls_io_begin = aslr_region_base; - - address_space_base = 0; - address_space_end = 1ULL << address_space_width; - - map_region_base = code_region_end; - map_region_end = map_region_base + map_region_size; - - heap_region_base = map_region_end; - heap_region_end = heap_region_base + heap_region_size; - heap_end = heap_region_base; - - stack_region_base = heap_region_end; - stack_region_end = stack_region_base + stack_region_size; - - tls_io_region_base = stack_region_end; - tls_io_region_end = tls_io_region_base + tls_io_region_size; - - if (stack_region_size == 0) { - stack_region_base = stack_and_tls_io_begin; - stack_region_end = stack_and_tls_io_end; - } - - if (tls_io_region_size == 0) { - tls_io_region_base = stack_and_tls_io_begin; - tls_io_region_end = stack_and_tls_io_end; - } -} - -void VMManager::Clear() { - ClearVMAMap(); - ClearPageTable(); -} - -void VMManager::ClearVMAMap() { - vma_map.clear(); -} - -void VMManager::ClearPageTable() { - std::fill(page_table.pointers.begin(), page_table.pointers.end(), nullptr); - page_table.special_regions.clear(); - std::fill(page_table.attributes.begin(), page_table.attributes.end(), - Common::PageType::Unmapped); -} - -VMManager::CheckResults VMManager::CheckRangeState(VAddr address, u64 size, MemoryState state_mask, - MemoryState state, VMAPermission permission_mask, - VMAPermission permissions, - MemoryAttribute attribute_mask, - MemoryAttribute attribute, - MemoryAttribute ignore_mask) const { - auto iter = FindVMA(address); - - // If we don't have a valid VMA handle at this point, then it means this is - // being called with an address outside of the address space, which is definitely - // indicative of a bug, as this function only operates on mapped memory regions. - DEBUG_ASSERT(IsValidHandle(iter)); - - const VAddr end_address = address + size - 1; - const MemoryAttribute initial_attributes = iter->second.attribute; - const VMAPermission initial_permissions = iter->second.permissions; - const MemoryState initial_state = iter->second.state; - - while (true) { - // The iterator should be valid throughout the traversal. Hitting the end of - // the mapped VMA regions is unquestionably indicative of a bug. - DEBUG_ASSERT(IsValidHandle(iter)); - - const auto& vma = iter->second; - - if (vma.state != initial_state) { - return ERR_INVALID_ADDRESS_STATE; - } - - if ((vma.state & state_mask) != state) { - return ERR_INVALID_ADDRESS_STATE; - } - - if (vma.permissions != initial_permissions) { - return ERR_INVALID_ADDRESS_STATE; - } - - if ((vma.permissions & permission_mask) != permissions) { - return ERR_INVALID_ADDRESS_STATE; - } - - if ((vma.attribute | ignore_mask) != (initial_attributes | ignore_mask)) { - return ERR_INVALID_ADDRESS_STATE; - } - - if ((vma.attribute & attribute_mask) != attribute) { - return ERR_INVALID_ADDRESS_STATE; - } - - if (end_address <= vma.EndAddress()) { - break; - } - - ++iter; - } - - return MakeResult( - std::make_tuple(initial_state, initial_permissions, initial_attributes & ~ignore_mask)); -} - -ResultVal<std::size_t> VMManager::SizeOfAllocatedVMAsInRange(VAddr address, - std::size_t size) const { - const VAddr end_addr = address + size; - const VAddr last_addr = end_addr - 1; - std::size_t mapped_size = 0; - - VAddr cur_addr = address; - auto iter = FindVMA(cur_addr); - ASSERT(iter != vma_map.end()); - - while (true) { - const auto& vma = iter->second; - const VAddr vma_start = vma.base; - const VAddr vma_end = vma_start + vma.size; - const VAddr vma_last = vma_end - 1; - - // Add size if relevant. - if (vma.state != MemoryState::Unmapped) { - mapped_size += std::min(end_addr - cur_addr, vma_end - cur_addr); - } - - // Break once we hit the end of the range. - if (last_addr <= vma_last) { - break; - } - - // Advance to the next block. - cur_addr = vma_end; - iter = std::next(iter); - ASSERT(iter != vma_map.end()); - } - - return MakeResult(mapped_size); -} - -ResultVal<std::size_t> VMManager::SizeOfUnmappablePhysicalMemoryInRange(VAddr address, - std::size_t size) const { - const VAddr end_addr = address + size; - const VAddr last_addr = end_addr - 1; - std::size_t mapped_size = 0; - - VAddr cur_addr = address; - auto iter = FindVMA(cur_addr); - ASSERT(iter != vma_map.end()); - - while (true) { - const auto& vma = iter->second; - const auto vma_start = vma.base; - const auto vma_end = vma_start + vma.size; - const auto vma_last = vma_end - 1; - const auto state = vma.state; - const auto attr = vma.attribute; - - // Memory within region must be free or mapped heap. - if (!((state == MemoryState::Heap && attr == MemoryAttribute::None) || - (state == MemoryState::Unmapped))) { - return ERR_INVALID_ADDRESS_STATE; - } - - // Add size if relevant. - if (state != MemoryState::Unmapped) { - mapped_size += std::min(end_addr - cur_addr, vma_end - cur_addr); - } - - // Break once we hit the end of the range. - if (last_addr <= vma_last) { - break; - } - - // Advance to the next block. - cur_addr = vma_end; - iter = std::next(iter); - ASSERT(iter != vma_map.end()); - } - - return MakeResult(mapped_size); -} - -u64 VMManager::GetTotalPhysicalMemoryAvailable() const { - LOG_WARNING(Kernel, "(STUBBED) called"); - return 0xF8000000; -} - -VAddr VMManager::GetAddressSpaceBaseAddress() const { - return address_space_base; -} - -VAddr VMManager::GetAddressSpaceEndAddress() const { - return address_space_end; -} - -u64 VMManager::GetAddressSpaceSize() const { - return address_space_end - address_space_base; -} - -u64 VMManager::GetAddressSpaceWidth() const { - return address_space_width; -} - -bool VMManager::IsWithinAddressSpace(VAddr address, u64 size) const { - return IsInsideAddressRange(address, size, GetAddressSpaceBaseAddress(), - GetAddressSpaceEndAddress()); -} - -VAddr VMManager::GetASLRRegionBaseAddress() const { - return aslr_region_base; -} - -VAddr VMManager::GetASLRRegionEndAddress() const { - return aslr_region_end; -} - -u64 VMManager::GetASLRRegionSize() const { - return aslr_region_end - aslr_region_base; -} - -bool VMManager::IsWithinASLRRegion(VAddr begin, u64 size) const { - const VAddr range_end = begin + size; - const VAddr aslr_start = GetASLRRegionBaseAddress(); - const VAddr aslr_end = GetASLRRegionEndAddress(); - - if (aslr_start > begin || begin > range_end || range_end - 1 > aslr_end - 1) { - return false; - } - - if (range_end > heap_region_base && heap_region_end > begin) { - return false; - } - - if (range_end > map_region_base && map_region_end > begin) { - return false; - } - - return true; -} - -VAddr VMManager::GetCodeRegionBaseAddress() const { - return code_region_base; -} - -VAddr VMManager::GetCodeRegionEndAddress() const { - return code_region_end; -} - -u64 VMManager::GetCodeRegionSize() const { - return code_region_end - code_region_base; -} - -bool VMManager::IsWithinCodeRegion(VAddr address, u64 size) const { - return IsInsideAddressRange(address, size, GetCodeRegionBaseAddress(), - GetCodeRegionEndAddress()); -} - -VAddr VMManager::GetHeapRegionBaseAddress() const { - return heap_region_base; -} - -VAddr VMManager::GetHeapRegionEndAddress() const { - return heap_region_end; -} - -u64 VMManager::GetHeapRegionSize() const { - return heap_region_end - heap_region_base; -} - -u64 VMManager::GetCurrentHeapSize() const { - return heap_end - heap_region_base; -} - -bool VMManager::IsWithinHeapRegion(VAddr address, u64 size) const { - return IsInsideAddressRange(address, size, GetHeapRegionBaseAddress(), - GetHeapRegionEndAddress()); -} - -VAddr VMManager::GetMapRegionBaseAddress() const { - return map_region_base; -} - -VAddr VMManager::GetMapRegionEndAddress() const { - return map_region_end; -} - -u64 VMManager::GetMapRegionSize() const { - return map_region_end - map_region_base; -} - -bool VMManager::IsWithinMapRegion(VAddr address, u64 size) const { - return IsInsideAddressRange(address, size, GetMapRegionBaseAddress(), GetMapRegionEndAddress()); -} - -VAddr VMManager::GetStackRegionBaseAddress() const { - return stack_region_base; -} - -VAddr VMManager::GetStackRegionEndAddress() const { - return stack_region_end; -} - -u64 VMManager::GetStackRegionSize() const { - return stack_region_end - stack_region_base; -} - -bool VMManager::IsWithinStackRegion(VAddr address, u64 size) const { - return IsInsideAddressRange(address, size, GetStackRegionBaseAddress(), - GetStackRegionEndAddress()); -} - -VAddr VMManager::GetTLSIORegionBaseAddress() const { - return tls_io_region_base; -} - -VAddr VMManager::GetTLSIORegionEndAddress() const { - return tls_io_region_end; -} - -u64 VMManager::GetTLSIORegionSize() const { - return tls_io_region_end - tls_io_region_base; -} - -bool VMManager::IsWithinTLSIORegion(VAddr address, u64 size) const { - return IsInsideAddressRange(address, size, GetTLSIORegionBaseAddress(), - GetTLSIORegionEndAddress()); -} - -} // namespace Kernel diff --git a/src/core/hle/kernel/vm_manager.h b/src/core/hle/kernel/vm_manager.h deleted file mode 100644 index 90b4b006a..000000000 --- a/src/core/hle/kernel/vm_manager.h +++ /dev/null @@ -1,796 +0,0 @@ -// Copyright 2015 Citra Emulator Project -// Licensed under GPLv2 or any later version -// Refer to the license.txt file included. - -#pragma once - -#include <map> -#include <memory> -#include <tuple> -#include <vector> -#include "common/common_types.h" -#include "common/memory_hook.h" -#include "common/page_table.h" -#include "core/hle/kernel/physical_memory.h" -#include "core/hle/result.h" -#include "core/memory.h" - -namespace Core { -class System; -} - -namespace FileSys { -enum class ProgramAddressSpaceType : u8; -} - -namespace Kernel { - -enum class VMAType : u8 { - /// VMA represents an unmapped region of the address space. - Free, - /// VMA is backed by a ref-counted allocate memory block. - AllocatedMemoryBlock, - /// VMA is backed by a raw, unmanaged pointer. - BackingMemory, - /// VMA is mapped to MMIO registers at a fixed PAddr. - MMIO, - // TODO(yuriks): Implement MemoryAlias to support MAP/UNMAP -}; - -/// Permissions for mapped memory blocks -enum class VMAPermission : u8 { - None = 0, - Read = 1, - Write = 2, - Execute = 4, - - ReadWrite = Read | Write, - ReadExecute = Read | Execute, - WriteExecute = Write | Execute, - ReadWriteExecute = Read | Write | Execute, - - // Used as a wildcard when checking permissions across memory ranges - All = 0xFF, -}; - -constexpr VMAPermission operator|(VMAPermission lhs, VMAPermission rhs) { - return static_cast<VMAPermission>(u32(lhs) | u32(rhs)); -} - -constexpr VMAPermission operator&(VMAPermission lhs, VMAPermission rhs) { - return static_cast<VMAPermission>(u32(lhs) & u32(rhs)); -} - -constexpr VMAPermission operator^(VMAPermission lhs, VMAPermission rhs) { - return static_cast<VMAPermission>(u32(lhs) ^ u32(rhs)); -} - -constexpr VMAPermission operator~(VMAPermission permission) { - return static_cast<VMAPermission>(~u32(permission)); -} - -constexpr VMAPermission& operator|=(VMAPermission& lhs, VMAPermission rhs) { - lhs = lhs | rhs; - return lhs; -} - -constexpr VMAPermission& operator&=(VMAPermission& lhs, VMAPermission rhs) { - lhs = lhs & rhs; - return lhs; -} - -constexpr VMAPermission& operator^=(VMAPermission& lhs, VMAPermission rhs) { - lhs = lhs ^ rhs; - return lhs; -} - -/// Attribute flags that can be applied to a VMA -enum class MemoryAttribute : u32 { - Mask = 0xFF, - - /// No particular qualities - None = 0, - /// Memory locked/borrowed for use. e.g. This would be used by transfer memory. - Locked = 1, - /// Memory locked for use by IPC-related internals. - LockedForIPC = 2, - /// Mapped as part of the device address space. - DeviceMapped = 4, - /// Uncached memory - Uncached = 8, - - IpcAndDeviceMapped = LockedForIPC | DeviceMapped, -}; - -constexpr MemoryAttribute operator|(MemoryAttribute lhs, MemoryAttribute rhs) { - return static_cast<MemoryAttribute>(u32(lhs) | u32(rhs)); -} - -constexpr MemoryAttribute operator&(MemoryAttribute lhs, MemoryAttribute rhs) { - return static_cast<MemoryAttribute>(u32(lhs) & u32(rhs)); -} - -constexpr MemoryAttribute operator^(MemoryAttribute lhs, MemoryAttribute rhs) { - return static_cast<MemoryAttribute>(u32(lhs) ^ u32(rhs)); -} - -constexpr MemoryAttribute operator~(MemoryAttribute attribute) { - return static_cast<MemoryAttribute>(~u32(attribute)); -} - -constexpr MemoryAttribute& operator|=(MemoryAttribute& lhs, MemoryAttribute rhs) { - lhs = lhs | rhs; - return lhs; -} - -constexpr MemoryAttribute& operator&=(MemoryAttribute& lhs, MemoryAttribute rhs) { - lhs = lhs & rhs; - return lhs; -} - -constexpr MemoryAttribute& operator^=(MemoryAttribute& lhs, MemoryAttribute rhs) { - lhs = lhs ^ rhs; - return lhs; -} - -constexpr u32 ToSvcMemoryAttribute(MemoryAttribute attribute) { - return static_cast<u32>(attribute & MemoryAttribute::Mask); -} - -// clang-format off -/// Represents memory states and any relevant flags, as used by the kernel. -/// svcQueryMemory interprets these by masking away all but the first eight -/// bits when storing memory state into a MemoryInfo instance. -enum class MemoryState : u32 { - Mask = 0xFF, - FlagProtect = 1U << 8, - FlagDebug = 1U << 9, - FlagIPC0 = 1U << 10, - FlagIPC3 = 1U << 11, - FlagIPC1 = 1U << 12, - FlagMapped = 1U << 13, - FlagCode = 1U << 14, - FlagAlias = 1U << 15, - FlagModule = 1U << 16, - FlagTransfer = 1U << 17, - FlagQueryPhysicalAddressAllowed = 1U << 18, - FlagSharedDevice = 1U << 19, - FlagSharedDeviceAligned = 1U << 20, - FlagIPCBuffer = 1U << 21, - FlagMemoryPoolAllocated = 1U << 22, - FlagMapProcess = 1U << 23, - FlagUncached = 1U << 24, - FlagCodeMemory = 1U << 25, - - // Wildcard used in range checking to indicate all states. - All = 0xFFFFFFFF, - - // Convenience flag sets to reduce repetition - IPCFlags = FlagIPC0 | FlagIPC3 | FlagIPC1, - - CodeFlags = FlagDebug | IPCFlags | FlagMapped | FlagCode | FlagQueryPhysicalAddressAllowed | - FlagSharedDevice | FlagSharedDeviceAligned | FlagMemoryPoolAllocated, - - DataFlags = FlagProtect | IPCFlags | FlagMapped | FlagAlias | FlagTransfer | - FlagQueryPhysicalAddressAllowed | FlagSharedDevice | FlagSharedDeviceAligned | - FlagMemoryPoolAllocated | FlagIPCBuffer | FlagUncached, - - Unmapped = 0x00, - Io = 0x01 | FlagMapped, - Normal = 0x02 | FlagMapped | FlagQueryPhysicalAddressAllowed, - Code = 0x03 | CodeFlags | FlagMapProcess, - CodeData = 0x04 | DataFlags | FlagMapProcess | FlagCodeMemory, - Heap = 0x05 | DataFlags | FlagCodeMemory, - Shared = 0x06 | FlagMapped | FlagMemoryPoolAllocated, - ModuleCode = 0x08 | CodeFlags | FlagModule | FlagMapProcess, - ModuleCodeData = 0x09 | DataFlags | FlagModule | FlagMapProcess | FlagCodeMemory, - - IpcBuffer0 = 0x0A | FlagMapped | FlagQueryPhysicalAddressAllowed | FlagMemoryPoolAllocated | - IPCFlags | FlagSharedDevice | FlagSharedDeviceAligned, - - Stack = 0x0B | FlagMapped | IPCFlags | FlagQueryPhysicalAddressAllowed | - FlagSharedDevice | FlagSharedDeviceAligned | FlagMemoryPoolAllocated, - - ThreadLocal = 0x0C | FlagMapped | FlagMemoryPoolAllocated, - - TransferMemoryIsolated = 0x0D | IPCFlags | FlagMapped | FlagQueryPhysicalAddressAllowed | - FlagSharedDevice | FlagSharedDeviceAligned | FlagMemoryPoolAllocated | - FlagUncached, - - TransferMemory = 0x0E | FlagIPC3 | FlagIPC1 | FlagMapped | FlagQueryPhysicalAddressAllowed | - FlagSharedDevice | FlagSharedDeviceAligned | FlagMemoryPoolAllocated, - - ProcessMemory = 0x0F | FlagIPC3 | FlagIPC1 | FlagMapped | FlagMemoryPoolAllocated, - - // Used to signify an inaccessible or invalid memory region with memory queries - Inaccessible = 0x10, - - IpcBuffer1 = 0x11 | FlagIPC3 | FlagIPC1 | FlagMapped | FlagQueryPhysicalAddressAllowed | - FlagSharedDevice | FlagSharedDeviceAligned | FlagMemoryPoolAllocated, - - IpcBuffer3 = 0x12 | FlagIPC3 | FlagMapped | FlagQueryPhysicalAddressAllowed | - FlagSharedDeviceAligned | FlagMemoryPoolAllocated, - - KernelStack = 0x13 | FlagMapped, -}; -// clang-format on - -constexpr MemoryState operator|(MemoryState lhs, MemoryState rhs) { - return static_cast<MemoryState>(u32(lhs) | u32(rhs)); -} - -constexpr MemoryState operator&(MemoryState lhs, MemoryState rhs) { - return static_cast<MemoryState>(u32(lhs) & u32(rhs)); -} - -constexpr MemoryState operator^(MemoryState lhs, MemoryState rhs) { - return static_cast<MemoryState>(u32(lhs) ^ u32(rhs)); -} - -constexpr MemoryState operator~(MemoryState lhs) { - return static_cast<MemoryState>(~u32(lhs)); -} - -constexpr MemoryState& operator|=(MemoryState& lhs, MemoryState rhs) { - lhs = lhs | rhs; - return lhs; -} - -constexpr MemoryState& operator&=(MemoryState& lhs, MemoryState rhs) { - lhs = lhs & rhs; - return lhs; -} - -constexpr MemoryState& operator^=(MemoryState& lhs, MemoryState rhs) { - lhs = lhs ^ rhs; - return lhs; -} - -constexpr u32 ToSvcMemoryState(MemoryState state) { - return static_cast<u32>(state & MemoryState::Mask); -} - -struct MemoryInfo { - u64 base_address; - u64 size; - u32 state; - u32 attributes; - u32 permission; - u32 ipc_ref_count; - u32 device_ref_count; -}; -static_assert(sizeof(MemoryInfo) == 0x28, "MemoryInfo has incorrect size."); - -struct PageInfo { - u32 flags; -}; - -/** - * Represents a VMA in an address space. A VMA is a contiguous region of virtual addressing space - * with homogeneous attributes across its extents. In this particular implementation each VMA is - * also backed by a single host memory allocation. - */ -struct VirtualMemoryArea { - /// Gets the starting (base) address of this VMA. - VAddr StartAddress() const { - return base; - } - - /// Gets the ending address of this VMA. - VAddr EndAddress() const { - return base + size - 1; - } - - /// Virtual base address of the region. - VAddr base = 0; - /// Size of the region. - u64 size = 0; - - VMAType type = VMAType::Free; - VMAPermission permissions = VMAPermission::None; - MemoryState state = MemoryState::Unmapped; - MemoryAttribute attribute = MemoryAttribute::None; - - // Settings for type = AllocatedMemoryBlock - /// Memory block backing this VMA. - std::shared_ptr<PhysicalMemory> backing_block = nullptr; - /// Offset into the backing_memory the mapping starts from. - std::size_t offset = 0; - - // Settings for type = BackingMemory - /// Pointer backing this VMA. It will not be destroyed or freed when the VMA is removed. - u8* backing_memory = nullptr; - - // Settings for type = MMIO - /// Physical address of the register area this VMA maps to. - PAddr paddr = 0; - Common::MemoryHookPointer mmio_handler = nullptr; - - /// Tests if this area can be merged to the right with `next`. - bool CanBeMergedWith(const VirtualMemoryArea& next) const; -}; - -/** - * Manages a process' virtual addressing space. This class maintains a list of allocated and free - * regions in the address space, along with their attributes, and allows kernel clients to - * manipulate it, adjusting the page table to match. - * - * This is similar in idea and purpose to the VM manager present in operating system kernels, with - * the main difference being that it doesn't have to support swapping or memory mapping of files. - * The implementation is also simplified by not having to allocate page frames. See these articles - * about the Linux kernel for an explantion of the concept and implementation: - * - http://duartes.org/gustavo/blog/post/how-the-kernel-manages-your-memory/ - * - http://duartes.org/gustavo/blog/post/page-cache-the-affair-between-memory-and-files/ - */ -class VMManager final { - using VMAMap = std::map<VAddr, VirtualMemoryArea>; - -public: - using VMAHandle = VMAMap::const_iterator; - - explicit VMManager(Core::System& system); - ~VMManager(); - - /// Clears the address space map, re-initializing with a single free area. - void Reset(FileSys::ProgramAddressSpaceType type); - - /// Finds the VMA in which the given address is included in, or `vma_map.end()`. - VMAHandle FindVMA(VAddr target) const; - - /// Indicates whether or not the given handle is within the VMA map. - bool IsValidHandle(VMAHandle handle) const; - - // TODO(yuriks): Should these functions actually return the handle? - - /** - * Maps part of a ref-counted block of memory at a given address. - * - * @param target The guest address to start the mapping at. - * @param block The block to be mapped. - * @param offset Offset into `block` to map from. - * @param size Size of the mapping. - * @param state MemoryState tag to attach to the VMA. - */ - ResultVal<VMAHandle> MapMemoryBlock(VAddr target, std::shared_ptr<PhysicalMemory> block, - std::size_t offset, u64 size, MemoryState state, - VMAPermission perm = VMAPermission::ReadWrite); - - /** - * Maps an unmanaged host memory pointer at a given address. - * - * @param target The guest address to start the mapping at. - * @param memory The memory to be mapped. - * @param size Size of the mapping. - * @param state MemoryState tag to attach to the VMA. - */ - ResultVal<VMAHandle> MapBackingMemory(VAddr target, u8* memory, u64 size, MemoryState state); - - /** - * Finds the first free memory region of the given size within - * the user-addressable ASLR memory region. - * - * @param size The size of the desired region in bytes. - * - * @returns If successful, the base address of the free region with - * the given size. - */ - ResultVal<VAddr> FindFreeRegion(u64 size) const; - - /** - * Finds the first free address range that can hold a region of the desired size - * - * @param begin The starting address of the range. - * This is treated as an inclusive beginning address. - * - * @param end The ending address of the range. - * This is treated as an exclusive ending address. - * - * @param size The size of the free region to attempt to locate, - * in bytes. - * - * @returns If successful, the base address of the free region with - * the given size. - * - * @returns If unsuccessful, a result containing an error code. - * - * @pre The starting address must be less than the ending address. - * @pre The size must not exceed the address range itself. - */ - ResultVal<VAddr> FindFreeRegion(VAddr begin, VAddr end, u64 size) const; - - /** - * Maps a memory-mapped IO region at a given address. - * - * @param target The guest address to start the mapping at. - * @param paddr The physical address where the registers are present. - * @param size Size of the mapping. - * @param state MemoryState tag to attach to the VMA. - * @param mmio_handler The handler that will implement read and write for this MMIO region. - */ - ResultVal<VMAHandle> MapMMIO(VAddr target, PAddr paddr, u64 size, MemoryState state, - Common::MemoryHookPointer mmio_handler); - - /// Unmaps a range of addresses, splitting VMAs as necessary. - ResultCode UnmapRange(VAddr target, u64 size); - - /// Changes the permissions of the given VMA. - VMAHandle Reprotect(VMAHandle vma, VMAPermission new_perms); - - /// Changes the permissions of a range of addresses, splitting VMAs as necessary. - ResultCode ReprotectRange(VAddr target, u64 size, VMAPermission new_perms); - - ResultCode MirrorMemory(VAddr dst_addr, VAddr src_addr, u64 size, MemoryState state); - - /// Attempts to allocate a heap with the given size. - /// - /// @param size The size of the heap to allocate in bytes. - /// - /// @note If a heap is currently allocated, and this is called - /// with a size that is equal to the size of the current heap, - /// then this function will do nothing and return the current - /// heap's starting address, as there's no need to perform - /// any additional heap allocation work. - /// - /// @note If a heap is currently allocated, and this is called - /// with a size less than the current heap's size, then - /// this function will attempt to shrink the heap. - /// - /// @note If a heap is currently allocated, and this is called - /// with a size larger than the current heap's size, then - /// this function will attempt to extend the size of the heap. - /// - /// @returns A result indicating either success or failure. - /// <p> - /// If successful, this function will return a result - /// containing the starting address to the allocated heap. - /// <p> - /// If unsuccessful, this function will return a result - /// containing an error code. - /// - /// @pre The given size must lie within the allowable heap - /// memory region managed by this VMManager instance. - /// Failure to abide by this will result in ERR_OUT_OF_MEMORY - /// being returned as the result. - /// - ResultVal<VAddr> SetHeapSize(u64 size); - - /// Maps memory at a given address. - /// - /// @param target The virtual address to map memory at. - /// @param size The amount of memory to map. - /// - /// @note The destination address must lie within the Map region. - /// - /// @note This function requires that SystemResourceSize be non-zero, - /// however, this is just because if it were not then the - /// resulting page tables could be exploited on hardware by - /// a malicious program. SystemResource usage does not need - /// to be explicitly checked or updated here. - ResultCode MapPhysicalMemory(VAddr target, u64 size); - - /// Unmaps memory at a given address. - /// - /// @param target The virtual address to unmap memory at. - /// @param size The amount of memory to unmap. - /// - /// @note The destination address must lie within the Map region. - /// - /// @note This function requires that SystemResourceSize be non-zero, - /// however, this is just because if it were not then the - /// resulting page tables could be exploited on hardware by - /// a malicious program. SystemResource usage does not need - /// to be explicitly checked or updated here. - ResultCode UnmapPhysicalMemory(VAddr target, u64 size); - - /// Maps a region of memory as code memory. - /// - /// @param dst_address The base address of the region to create the aliasing memory region. - /// @param src_address The base address of the region to be aliased. - /// @param size The total amount of memory to map in bytes. - /// - /// @pre Both memory regions lie within the actual addressable address space. - /// - /// @post After this function finishes execution, assuming success, then the address range - /// [dst_address, dst_address+size) will alias the memory region, - /// [src_address, src_address+size). - /// <p> - /// What this also entails is as follows: - /// 1. The aliased region gains the Locked memory attribute. - /// 2. The aliased region becomes read-only. - /// 3. The aliasing region becomes read-only. - /// 4. The aliasing region is created with a memory state of MemoryState::CodeModule. - /// - ResultCode MapCodeMemory(VAddr dst_address, VAddr src_address, u64 size); - - /// Unmaps a region of memory designated as code module memory. - /// - /// @param dst_address The base address of the memory region aliasing the source memory region. - /// @param src_address The base address of the memory region being aliased. - /// @param size The size of the memory region to unmap in bytes. - /// - /// @pre Both memory ranges lie within the actual addressable address space. - /// - /// @pre The memory region being unmapped has been previously been mapped - /// by a call to MapCodeMemory. - /// - /// @post After execution of the function, if successful. the aliasing memory region - /// will be unmapped and the aliased region will have various traits about it - /// restored to what they were prior to the original mapping call preceding - /// this function call. - /// <p> - /// What this also entails is as follows: - /// 1. The state of the memory region will now indicate a general heap region. - /// 2. All memory attributes for the memory region are cleared. - /// 3. Memory permissions for the region are restored to user read/write. - /// - ResultCode UnmapCodeMemory(VAddr dst_address, VAddr src_address, u64 size); - - /// Queries the memory manager for information about the given address. - /// - /// @param address The address to query the memory manager about for information. - /// - /// @return A MemoryInfo instance containing information about the given address. - /// - MemoryInfo QueryMemory(VAddr address) const; - - /// Sets an attribute across the given address range. - /// - /// @param address The starting address - /// @param size The size of the range to set the attribute on. - /// @param mask The attribute mask - /// @param attribute The attribute to set across the given address range - /// - /// @returns RESULT_SUCCESS if successful - /// @returns ERR_INVALID_ADDRESS_STATE if the attribute could not be set. - /// - ResultCode SetMemoryAttribute(VAddr address, u64 size, MemoryAttribute mask, - MemoryAttribute attribute); - - /** - * Scans all VMAs and updates the page table range of any that use the given vector as backing - * memory. This should be called after any operation that causes reallocation of the vector. - */ - void RefreshMemoryBlockMappings(const PhysicalMemory* block); - - /// Dumps the address space layout to the log, for debugging - void LogLayout() const; - - /// Gets the total memory usage, used by svcGetInfo - u64 GetTotalPhysicalMemoryAvailable() const; - - /// Gets the address space base address - VAddr GetAddressSpaceBaseAddress() const; - - /// Gets the address space end address - VAddr GetAddressSpaceEndAddress() const; - - /// Gets the total address space address size in bytes - u64 GetAddressSpaceSize() const; - - /// Gets the address space width in bits. - u64 GetAddressSpaceWidth() const; - - /// Determines whether or not the given address range lies within the address space. - bool IsWithinAddressSpace(VAddr address, u64 size) const; - - /// Gets the base address of the ASLR region. - VAddr GetASLRRegionBaseAddress() const; - - /// Gets the end address of the ASLR region. - VAddr GetASLRRegionEndAddress() const; - - /// Gets the size of the ASLR region - u64 GetASLRRegionSize() const; - - /// Determines whether or not the specified address range is within the ASLR region. - bool IsWithinASLRRegion(VAddr address, u64 size) const; - - /// Gets the base address of the code region. - VAddr GetCodeRegionBaseAddress() const; - - /// Gets the end address of the code region. - VAddr GetCodeRegionEndAddress() const; - - /// Gets the total size of the code region in bytes. - u64 GetCodeRegionSize() const; - - /// Determines whether or not the specified range is within the code region. - bool IsWithinCodeRegion(VAddr address, u64 size) const; - - /// Gets the base address of the heap region. - VAddr GetHeapRegionBaseAddress() const; - - /// Gets the end address of the heap region; - VAddr GetHeapRegionEndAddress() const; - - /// Gets the total size of the heap region in bytes. - u64 GetHeapRegionSize() const; - - /// Gets the total size of the current heap in bytes. - /// - /// @note This is the current allocated heap size, not the size - /// of the region it's allowed to exist within. - /// - u64 GetCurrentHeapSize() const; - - /// Determines whether or not the specified range is within the heap region. - bool IsWithinHeapRegion(VAddr address, u64 size) const; - - /// Gets the base address of the map region. - VAddr GetMapRegionBaseAddress() const; - - /// Gets the end address of the map region. - VAddr GetMapRegionEndAddress() const; - - /// Gets the total size of the map region in bytes. - u64 GetMapRegionSize() const; - - /// Determines whether or not the specified range is within the map region. - bool IsWithinMapRegion(VAddr address, u64 size) const; - - /// Gets the base address of the stack region. - VAddr GetStackRegionBaseAddress() const; - - /// Gets the end address of the stack region. - VAddr GetStackRegionEndAddress() const; - - /// Gets the total size of the stack region in bytes. - u64 GetStackRegionSize() const; - - /// Determines whether or not the given address range is within the stack region - bool IsWithinStackRegion(VAddr address, u64 size) const; - - /// Gets the base address of the TLS IO region. - VAddr GetTLSIORegionBaseAddress() const; - - /// Gets the end address of the TLS IO region. - VAddr GetTLSIORegionEndAddress() const; - - /// Gets the total size of the TLS IO region in bytes. - u64 GetTLSIORegionSize() const; - - /// Determines if the given address range is within the TLS IO region. - bool IsWithinTLSIORegion(VAddr address, u64 size) const; - - /// Each VMManager has its own page table, which is set as the main one when the owning process - /// is scheduled. - Common::PageTable page_table{Memory::PAGE_BITS}; - - using CheckResults = ResultVal<std::tuple<MemoryState, VMAPermission, MemoryAttribute>>; - - /// Checks if an address range adheres to the specified states provided. - /// - /// @param address The starting address of the address range. - /// @param size The size of the address range. - /// @param state_mask The memory state mask. - /// @param state The state to compare the individual VMA states against, - /// which is done in the form of: (vma.state & state_mask) != state. - /// @param permission_mask The memory permissions mask. - /// @param permissions The permission to compare the individual VMA permissions against, - /// which is done in the form of: - /// (vma.permission & permission_mask) != permission. - /// @param attribute_mask The memory attribute mask. - /// @param attribute The memory attributes to compare the individual VMA attributes - /// against, which is done in the form of: - /// (vma.attributes & attribute_mask) != attribute. - /// @param ignore_mask The memory attributes to ignore during the check. - /// - /// @returns If successful, returns a tuple containing the memory attributes - /// (with ignored bits specified by ignore_mask unset), memory permissions, and - /// memory state across the memory range. - /// @returns If not successful, returns ERR_INVALID_ADDRESS_STATE. - /// - CheckResults CheckRangeState(VAddr address, u64 size, MemoryState state_mask, MemoryState state, - VMAPermission permission_mask, VMAPermission permissions, - MemoryAttribute attribute_mask, MemoryAttribute attribute, - MemoryAttribute ignore_mask) const; - -private: - using VMAIter = VMAMap::iterator; - - /// Converts a VMAHandle to a mutable VMAIter. - VMAIter StripIterConstness(const VMAHandle& iter); - - /// Unmaps the given VMA. - VMAIter Unmap(VMAIter vma); - - /** - * Carves a VMA of a specific size at the specified address by splitting Free VMAs while doing - * the appropriate error checking. - */ - ResultVal<VMAIter> CarveVMA(VAddr base, u64 size); - - /** - * Splits the edges of the given range of non-Free VMAs so that there is a VMA split at each - * end of the range. - */ - ResultVal<VMAIter> CarveVMARange(VAddr base, u64 size); - - /** - * Splits a VMA in two, at the specified offset. - * @returns the right side of the split, with the original iterator becoming the left side. - */ - VMAIter SplitVMA(VMAIter vma, u64 offset_in_vma); - - /** - * Checks for and merges the specified VMA with adjacent ones if possible. - * @returns the merged VMA or the original if no merging was possible. - */ - VMAIter MergeAdjacent(VMAIter vma); - - /** - * Merges two adjacent VMAs. - */ - void MergeAdjacentVMA(VirtualMemoryArea& left, const VirtualMemoryArea& right); - - /// Updates the pages corresponding to this VMA so they match the VMA's attributes. - void UpdatePageTableForVMA(const VirtualMemoryArea& vma); - - /// Initializes memory region ranges to adhere to a given address space type. - void InitializeMemoryRegionRanges(FileSys::ProgramAddressSpaceType type); - - /// Clears the underlying map and page table. - void Clear(); - - /// Clears out the VMA map, unmapping any previously mapped ranges. - void ClearVMAMap(); - - /// Clears out the page table - void ClearPageTable(); - - /// Gets the amount of memory currently mapped (state != Unmapped) in a range. - ResultVal<std::size_t> SizeOfAllocatedVMAsInRange(VAddr address, std::size_t size) const; - - /// Gets the amount of memory unmappable by UnmapPhysicalMemory in a range. - ResultVal<std::size_t> SizeOfUnmappablePhysicalMemoryInRange(VAddr address, - std::size_t size) const; - - /** - * A map covering the entirety of the managed address space, keyed by the `base` field of each - * VMA. It must always be modified by splitting or merging VMAs, so that the invariant - * `elem.base + elem.size == next.base` is preserved, and mergeable regions must always be - * merged when possible so that no two similar and adjacent regions exist that have not been - * merged. - */ - VMAMap vma_map; - - u32 address_space_width = 0; - VAddr address_space_base = 0; - VAddr address_space_end = 0; - - VAddr aslr_region_base = 0; - VAddr aslr_region_end = 0; - - VAddr code_region_base = 0; - VAddr code_region_end = 0; - - VAddr heap_region_base = 0; - VAddr heap_region_end = 0; - - VAddr map_region_base = 0; - VAddr map_region_end = 0; - - VAddr stack_region_base = 0; - VAddr stack_region_end = 0; - - VAddr tls_io_region_base = 0; - VAddr tls_io_region_end = 0; - - // Memory used to back the allocations in the regular heap. A single vector is used to cover - // the entire virtual address space extents that bound the allocations, including any holes. - // This makes deallocation and reallocation of holes fast and keeps process memory contiguous - // in the emulator address space, allowing Memory::GetPointer to be reasonably safe. - std::shared_ptr<PhysicalMemory> heap_memory; - - // The end of the currently allocated heap. This is not an inclusive - // end of the range. This is essentially 'base_address + current_size'. - VAddr heap_end = 0; - - // The current amount of memory mapped via MapPhysicalMemory. - // This is used here (and in Nintendo's kernel) only for debugging, and does not impact - // any behavior. - u64 physical_memory_mapped = 0; - - Core::System& system; -}; -} // namespace Kernel diff --git a/src/core/hle/service/am/am.cpp b/src/core/hle/service/am/am.cpp index 557608e76..3ece2cf3c 100644 --- a/src/core/hle/service/am/am.cpp +++ b/src/core/hle/service/am/am.cpp @@ -903,7 +903,7 @@ private: void PopOutData(Kernel::HLERequestContext& ctx) { LOG_DEBUG(Service_AM, "called"); - const auto storage = applet->GetBroker().PopNormalDataToGame(); + auto storage = applet->GetBroker().PopNormalDataToGame(); if (storage == nullptr) { LOG_ERROR(Service_AM, "storage is a nullptr. There is no data in the current normal channel"); @@ -934,7 +934,7 @@ private: void PopInteractiveOutData(Kernel::HLERequestContext& ctx) { LOG_DEBUG(Service_AM, "called"); - const auto storage = applet->GetBroker().PopInteractiveDataToGame(); + auto storage = applet->GetBroker().PopInteractiveDataToGame(); if (storage == nullptr) { LOG_ERROR(Service_AM, "storage is a nullptr. There is no data in the current interactive channel"); diff --git a/src/core/hle/service/audio/audout_u.cpp b/src/core/hle/service/audio/audout_u.cpp index 4fb2cbc4b..106e89743 100644 --- a/src/core/hle/service/audio/audout_u.cpp +++ b/src/core/hle/service/audio/audout_u.cpp @@ -210,7 +210,7 @@ private: /// This is the event handle used to check if the audio buffer was released Kernel::EventPair buffer_event; - Memory::Memory& main_memory; + Core::Memory::Memory& main_memory; }; AudOutU::AudOutU(Core::System& system_) : ServiceFramework("audout:u"), system{system_} { diff --git a/src/core/hle/service/audio/audren_u.cpp b/src/core/hle/service/audio/audren_u.cpp index 175cabf45..d8359abaa 100644 --- a/src/core/hle/service/audio/audren_u.cpp +++ b/src/core/hle/service/audio/audren_u.cpp @@ -92,11 +92,16 @@ private: } void RequestUpdateImpl(Kernel::HLERequestContext& ctx) { - LOG_WARNING(Service_Audio, "(STUBBED) called"); + LOG_DEBUG(Service_Audio, "(STUBBED) called"); + + auto result = renderer->UpdateAudioRenderer(ctx.ReadBuffer()); + + if (result.Succeeded()) { + ctx.WriteBuffer(result.Unwrap()); + } - ctx.WriteBuffer(renderer->UpdateAudioRenderer(ctx.ReadBuffer())); IPC::ResponseBuilder rb{ctx, 2}; - rb.Push(RESULT_SUCCESS); + rb.Push(result.Code()); } void Start(Kernel::HLERequestContext& ctx) { @@ -252,8 +257,6 @@ private: } void GetAudioDeviceOutputVolume(Kernel::HLERequestContext& ctx) { - IPC::RequestParser rp{ctx}; - const auto device_name_buffer = ctx.ReadBuffer(); const std::string name = Common::StringFromBuffer(device_name_buffer); diff --git a/src/core/hle/service/es/es.cpp b/src/core/hle/service/es/es.cpp index 1f37bc4d6..f8e9df4b1 100644 --- a/src/core/hle/service/es/es.cpp +++ b/src/core/hle/service/es/es.cpp @@ -77,7 +77,6 @@ private: } void ImportTicket(Kernel::HLERequestContext& ctx) { - IPC::RequestParser rp{ctx}; const auto ticket = ctx.ReadBuffer(); const auto cert = ctx.ReadBuffer(1); diff --git a/src/core/hle/service/hid/controllers/npad.cpp b/src/core/hle/service/hid/controllers/npad.cpp index c1e32b28c..c55d900e2 100644 --- a/src/core/hle/service/hid/controllers/npad.cpp +++ b/src/core/hle/service/hid/controllers/npad.cpp @@ -107,6 +107,7 @@ void Controller_NPad::InitNewlyAddedControler(std::size_t controller_idx) { switch (controller_type) { case NPadControllerType::None: UNREACHABLE(); + break; case NPadControllerType::Handheld: controller.joy_styles.handheld.Assign(1); controller.device_type.handheld.Assign(1); @@ -363,6 +364,7 @@ void Controller_NPad::OnUpdate(const Core::Timing::CoreTiming& core_timing, u8* switch (controller_type) { case NPadControllerType::None: UNREACHABLE(); + break; case NPadControllerType::Handheld: handheld_entry.connection_status.raw = 0; handheld_entry.connection_status.IsWired.Assign(1); @@ -500,7 +502,7 @@ void Controller_NPad::SetNpadMode(u32 npad_id, NPadAssignments assignment_mode) void Controller_NPad::VibrateController(const std::vector<u32>& controller_ids, const std::vector<Vibration>& vibrations) { - LOG_WARNING(Service_HID, "(STUBBED) called"); + LOG_DEBUG(Service_HID, "(STUBBED) called"); if (!can_controllers_vibrate) { return; diff --git a/src/core/hle/service/hid/hid.cpp b/src/core/hle/service/hid/hid.cpp index d6ed5f304..d6031a987 100644 --- a/src/core/hle/service/hid/hid.cpp +++ b/src/core/hle/service/hid/hid.cpp @@ -14,6 +14,7 @@ #include "core/hle/ipc_helpers.h" #include "core/hle/kernel/client_port.h" #include "core/hle/kernel/client_session.h" +#include "core/hle/kernel/kernel.h" #include "core/hle/kernel/readable_event.h" #include "core/hle/kernel/shared_memory.h" #include "core/hle/kernel/writable_event.h" @@ -53,9 +54,7 @@ IAppletResource::IAppletResource(Core::System& system) RegisterHandlers(functions); auto& kernel = system.Kernel(); - shared_mem = Kernel::SharedMemory::Create( - kernel, nullptr, SHARED_MEMORY_SIZE, Kernel::MemoryPermission::ReadWrite, - Kernel::MemoryPermission::Read, 0, Kernel::MemoryRegion::BASE, "HID:SharedMemory"); + shared_mem = SharedFrom(&kernel.GetHidSharedMem()); MakeController<Controller_DebugPad>(HidController::DebugPad); MakeController<Controller_Touchscreen>(HidController::Touchscreen); diff --git a/src/core/hle/service/hid/irs.cpp b/src/core/hle/service/hid/irs.cpp index 5e79e2c1a..36ed6f7da 100644 --- a/src/core/hle/service/hid/irs.cpp +++ b/src/core/hle/service/hid/irs.cpp @@ -6,6 +6,7 @@ #include "core/core.h" #include "core/core_timing.h" #include "core/hle/ipc_helpers.h" +#include "core/hle/kernel/kernel.h" #include "core/hle/kernel/shared_memory.h" #include "core/hle/service/hid/irs.h" @@ -38,9 +39,8 @@ IRS::IRS(Core::System& system) : ServiceFramework{"irs"}, system(system) { RegisterHandlers(functions); auto& kernel = system.Kernel(); - shared_mem = Kernel::SharedMemory::Create( - kernel, nullptr, 0x8000, Kernel::MemoryPermission::ReadWrite, - Kernel::MemoryPermission::Read, 0, Kernel::MemoryRegion::BASE, "IRS:SharedMemory"); + + shared_mem = SharedFrom(&kernel.GetIrsSharedMem()); } void IRS::ActivateIrsensor(Kernel::HLERequestContext& ctx) { diff --git a/src/core/hle/service/ldr/ldr.cpp b/src/core/hle/service/ldr/ldr.cpp index 647943020..0cde7a557 100644 --- a/src/core/hle/service/ldr/ldr.cpp +++ b/src/core/hle/service/ldr/ldr.cpp @@ -8,14 +8,21 @@ #include "common/alignment.h" #include "common/hex_util.h" +#include "common/scope_exit.h" #include "core/hle/ipc_helpers.h" +#include "core/hle/kernel/errors.h" +#include "core/hle/kernel/memory/page_table.h" +#include "core/hle/kernel/memory/system_control.h" #include "core/hle/kernel/process.h" #include "core/hle/service/ldr/ldr.h" #include "core/hle/service/service.h" #include "core/loader/nro.h" +#include "core/memory.h" namespace Service::LDR { +constexpr ResultCode ERROR_INSUFFICIENT_ADDRESS_SPACE{ErrorModule::RO, 2}; + constexpr ResultCode ERROR_INVALID_MEMORY_STATE{ErrorModule::Loader, 51}; constexpr ResultCode ERROR_INVALID_NRO{ErrorModule::Loader, 52}; constexpr ResultCode ERROR_INVALID_NRR{ErrorModule::Loader, 53}; @@ -29,7 +36,61 @@ constexpr ResultCode ERROR_INVALID_NRO_ADDRESS{ErrorModule::Loader, 84}; constexpr ResultCode ERROR_INVALID_NRR_ADDRESS{ErrorModule::Loader, 85}; constexpr ResultCode ERROR_NOT_INITIALIZED{ErrorModule::Loader, 87}; -constexpr u64 MAXIMUM_LOADED_RO = 0x40; +constexpr std::size_t MAXIMUM_LOADED_RO{0x40}; +constexpr std::size_t MAXIMUM_MAP_RETRIES{0x200}; + +struct NRRHeader { + u32_le magic; + INSERT_PADDING_BYTES(12); + u64_le title_id_mask; + u64_le title_id_pattern; + INSERT_PADDING_BYTES(16); + std::array<u8, 0x100> modulus; + std::array<u8, 0x100> signature_1; + std::array<u8, 0x100> signature_2; + u64_le title_id; + u32_le size; + INSERT_PADDING_BYTES(4); + u32_le hash_offset; + u32_le hash_count; + INSERT_PADDING_BYTES(8); +}; +static_assert(sizeof(NRRHeader) == 0x350, "NRRHeader has incorrect size."); + +struct NROHeader { + INSERT_PADDING_WORDS(1); + u32_le mod_offset; + INSERT_PADDING_WORDS(2); + u32_le magic; + u32_le version; + u32_le nro_size; + u32_le flags; + u32_le text_offset; + u32_le text_size; + u32_le ro_offset; + u32_le ro_size; + u32_le rw_offset; + u32_le rw_size; + u32_le bss_size; + INSERT_PADDING_WORDS(1); + std::array<u8, 0x20> build_id; + INSERT_PADDING_BYTES(0x20); +}; +static_assert(sizeof(NROHeader) == 0x80, "NROHeader has invalid size."); + +using SHA256Hash = std::array<u8, 0x20>; + +struct NROInfo { + SHA256Hash hash{}; + VAddr nro_address{}; + std::size_t nro_size{}; + VAddr bss_address{}; + std::size_t bss_size{}; + std::size_t text_size{}; + std::size_t ro_size{}; + std::size_t data_size{}; + VAddr src_addr{}; +}; class DebugMonitor final : public ServiceFramework<DebugMonitor> { public: @@ -84,7 +145,7 @@ public: {0, &RelocatableObject::LoadNro, "LoadNro"}, {1, &RelocatableObject::UnloadNro, "UnloadNro"}, {2, &RelocatableObject::LoadNrr, "LoadNrr"}, - {3, &RelocatableObject::UnloadNrr, "UnloadNrr"}, + {3, nullptr, "UnloadNrr"}, {4, &RelocatableObject::Initialize, "Initialize"}, {10, nullptr, "LoadNrrEx"}, }; @@ -190,46 +251,125 @@ public: rb.Push(RESULT_SUCCESS); } - void UnloadNrr(Kernel::HLERequestContext& ctx) { - if (!initialized) { - LOG_ERROR(Service_LDR, "LDR:RO not initialized before use!"); - IPC::ResponseBuilder rb{ctx, 2}; - rb.Push(ERROR_NOT_INITIALIZED); - return; + bool ValidateRegionForMap(Kernel::Memory::PageTable& page_table, VAddr start, + std::size_t size) const { + constexpr std::size_t padding_size{4 * Kernel::Memory::PageSize}; + const auto start_info{page_table.QueryInfo(start - 1)}; + + if (start_info.state != Kernel::Memory::MemoryState::Free) { + return {}; } - struct Parameters { - u64_le process_id; - u64_le nrr_address; - }; + if (start_info.GetAddress() > (start - padding_size)) { + return {}; + } - IPC::RequestParser rp{ctx}; - const auto [process_id, nrr_address] = rp.PopRaw<Parameters>(); + const auto end_info{page_table.QueryInfo(start + size)}; - LOG_DEBUG(Service_LDR, "called with process_id={:016X}, nrr_addr={:016X}", process_id, - nrr_address); + if (end_info.state != Kernel::Memory::MemoryState::Free) { + return {}; + } - if (!Common::Is4KBAligned(nrr_address)) { - LOG_ERROR(Service_LDR, "NRR Address has invalid alignment (actual {:016X})!", - nrr_address); - IPC::ResponseBuilder rb{ctx, 2}; - rb.Push(ERROR_INVALID_ALIGNMENT); - return; + return (start + size + padding_size) <= (end_info.GetAddress() + end_info.GetSize()); + } + + VAddr GetRandomMapRegion(const Kernel::Memory::PageTable& page_table, std::size_t size) const { + VAddr addr{}; + const std::size_t end_pages{(page_table.GetAliasCodeRegionSize() - size) >> + Kernel::Memory::PageBits}; + do { + addr = page_table.GetAliasCodeRegionStart() + + (Kernel::Memory::SystemControl::GenerateRandomRange(0, end_pages) + << Kernel::Memory::PageBits); + } while (!page_table.IsInsideAddressSpace(addr, size) || + page_table.IsInsideHeapRegion(addr, size) || + page_table.IsInsideAliasRegion(addr, size)); + return addr; + } + + ResultVal<VAddr> MapProcessCodeMemory(Kernel::Process* process, VAddr baseAddress, + u64 size) const { + for (int retry{}; retry < MAXIMUM_MAP_RETRIES; retry++) { + auto& page_table{process->PageTable()}; + const VAddr addr{GetRandomMapRegion(page_table, size)}; + const ResultCode result{page_table.MapProcessCodeMemory(addr, baseAddress, size)}; + + if (result == Kernel::ERR_INVALID_ADDRESS_STATE) { + continue; + } + + CASCADE_CODE(result); + + if (ValidateRegionForMap(page_table, addr, size)) { + return MakeResult<VAddr>(addr); + } } - const auto iter = nrr.find(nrr_address); - if (iter == nrr.end()) { - LOG_ERROR(Service_LDR, - "Attempting to unload NRR which has not been loaded! (addr={:016X})", - nrr_address); - IPC::ResponseBuilder rb{ctx, 2}; - rb.Push(ERROR_INVALID_NRR_ADDRESS); - return; + return ERROR_INSUFFICIENT_ADDRESS_SPACE; + } + + ResultVal<VAddr> MapNro(Kernel::Process* process, VAddr nro_addr, std::size_t nro_size, + VAddr bss_addr, std::size_t bss_size, std::size_t size) const { + + for (int retry{}; retry < MAXIMUM_MAP_RETRIES; retry++) { + auto& page_table{process->PageTable()}; + VAddr addr{}; + + CASCADE_RESULT(addr, MapProcessCodeMemory(process, nro_addr, nro_size)); + + if (bss_size) { + auto block_guard = detail::ScopeExit([&] { + page_table.UnmapProcessCodeMemory(addr + nro_size, bss_addr, bss_size); + page_table.UnmapProcessCodeMemory(addr, nro_addr, nro_size); + }); + + const ResultCode result{ + page_table.MapProcessCodeMemory(addr + nro_size, bss_addr, bss_size)}; + + if (result == Kernel::ERR_INVALID_ADDRESS_STATE) { + continue; + } + + if (result.IsError()) { + return result; + } + + block_guard.Cancel(); + } + + if (ValidateRegionForMap(page_table, addr, size)) { + return MakeResult<VAddr>(addr); + } } - nrr.erase(iter); - IPC::ResponseBuilder rb{ctx, 2}; - rb.Push(RESULT_SUCCESS); + return ERROR_INSUFFICIENT_ADDRESS_SPACE; + } + + ResultCode LoadNro(Kernel::Process* process, const NROHeader& nro_header, VAddr nro_addr, + VAddr start) const { + const VAddr text_start{start + nro_header.text_offset}; + const VAddr ro_start{start + nro_header.ro_offset}; + const VAddr data_start{start + nro_header.rw_offset}; + const VAddr bss_start{data_start + nro_header.rw_size}; + const VAddr bss_end_addr{ + Common::AlignUp(bss_start + nro_header.bss_size, Kernel::Memory::PageSize)}; + + auto CopyCode{[&](VAddr src_addr, VAddr dst_addr, u64 size) { + std::vector<u8> source_data(size); + system.Memory().ReadBlock(src_addr, source_data.data(), source_data.size()); + system.Memory().WriteBlock(dst_addr, source_data.data(), source_data.size()); + }}; + CopyCode(nro_addr + nro_header.text_offset, text_start, nro_header.text_size); + CopyCode(nro_addr + nro_header.ro_offset, ro_start, nro_header.ro_size); + CopyCode(nro_addr + nro_header.rw_offset, data_start, nro_header.rw_size); + + CASCADE_CODE(process->PageTable().SetCodeMemoryPermission( + text_start, ro_start - text_start, Kernel::Memory::MemoryPermission::ReadAndExecute)); + CASCADE_CODE(process->PageTable().SetCodeMemoryPermission( + ro_start, data_start - ro_start, Kernel::Memory::MemoryPermission::Read)); + + return process->PageTable().SetCodeMemoryPermission( + data_start, bss_end_addr - data_start, Kernel::Memory::MemoryPermission::ReadAndWrite); } void LoadNro(Kernel::HLERequestContext& ctx) { @@ -317,9 +457,9 @@ public: return; } - NROHeader header; + // Load and validate the NRO header + NROHeader header{}; std::memcpy(&header, nro_data.data(), sizeof(NROHeader)); - if (!IsValidNRO(header, nro_size, bss_size)) { LOG_ERROR(Service_LDR, "NRO was invalid!"); IPC::ResponseBuilder rb{ctx, 2}; @@ -327,62 +467,48 @@ public: return; } - // Load NRO as new executable module - auto* process = system.CurrentProcess(); - auto& vm_manager = process->VMManager(); - auto map_address = vm_manager.FindFreeRegion(nro_size + bss_size); - - if (!map_address.Succeeded() || - *map_address + nro_size + bss_size > vm_manager.GetAddressSpaceEndAddress()) { - - LOG_ERROR(Service_LDR, - "General error while allocation memory or no available memory to allocate!"); + // Map memory for the NRO + const auto map_result{MapNro(system.CurrentProcess(), nro_address, nro_size, bss_address, + bss_size, nro_size + bss_size)}; + if (map_result.Failed()) { IPC::ResponseBuilder rb{ctx, 2}; - rb.Push(ERROR_INVALID_MEMORY_STATE); - return; + rb.Push(map_result.Code()); } - // Mark text and read-only region as ModuleCode - ASSERT(vm_manager - .MirrorMemory(*map_address, nro_address, header.text_size + header.ro_size, - Kernel::MemoryState::ModuleCode) - .IsSuccess()); - // Mark read/write region as ModuleCodeData, which is necessary if this region is used for - // TransferMemory (e.g. Final Fantasy VIII Remastered does this) - ASSERT(vm_manager - .MirrorMemory(*map_address + header.rw_offset, nro_address + header.rw_offset, - header.rw_size, Kernel::MemoryState::ModuleCodeData) - .IsSuccess()); - // Revoke permissions from the old memory region - ASSERT(vm_manager.ReprotectRange(nro_address, nro_size, Kernel::VMAPermission::None) - .IsSuccess()); - - if (bss_size > 0) { - // Mark BSS region as ModuleCodeData, which is necessary if this region is used for - // TransferMemory (e.g. Final Fantasy VIII Remastered does this) - ASSERT(vm_manager - .MirrorMemory(*map_address + nro_size, bss_address, bss_size, - Kernel::MemoryState::ModuleCodeData) - .IsSuccess()); - ASSERT(vm_manager.ReprotectRange(bss_address, bss_size, Kernel::VMAPermission::None) - .IsSuccess()); + // Load the NRO into the mapped memory + if (const auto result{LoadNro(system.CurrentProcess(), header, nro_address, *map_result)}; + result.IsError()) { + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(map_result.Code()); } - vm_manager.ReprotectRange(*map_address, header.text_size, - Kernel::VMAPermission::ReadExecute); - vm_manager.ReprotectRange(*map_address + header.ro_offset, header.ro_size, - Kernel::VMAPermission::Read); - vm_manager.ReprotectRange(*map_address + header.rw_offset, header.rw_size, - Kernel::VMAPermission::ReadWrite); + // Track the loaded NRO + nro.insert_or_assign(*map_result, NROInfo{hash, *map_result, nro_size, bss_address, + bss_size, header.text_size, header.ro_size, + header.rw_size, nro_address}); + // Invalidate JIT caches for the newly mapped process code system.InvalidateCpuInstructionCaches(); - nro.insert_or_assign(*map_address, - NROInfo{hash, nro_address, nro_size, bss_address, bss_size}); - IPC::ResponseBuilder rb{ctx, 4}; rb.Push(RESULT_SUCCESS); - rb.Push(*map_address); + rb.Push(*map_result); + } + + ResultCode UnmapNro(const NROInfo& info) { + // Each region must be unmapped separately to validate memory state + auto& page_table{system.CurrentProcess()->PageTable()}; + CASCADE_CODE(page_table.UnmapProcessCodeMemory(info.nro_address + info.text_size + + info.ro_size + info.data_size, + info.bss_address, info.bss_size)); + CASCADE_CODE(page_table.UnmapProcessCodeMemory( + info.nro_address + info.text_size + info.ro_size, + info.src_addr + info.text_size + info.ro_size, info.data_size)); + CASCADE_CODE(page_table.UnmapProcessCodeMemory( + info.nro_address + info.text_size, info.src_addr + info.text_size, info.ro_size)); + CASCADE_CODE( + page_table.UnmapProcessCodeMemory(info.nro_address, info.src_addr, info.text_size)); + return RESULT_SUCCESS; } void UnloadNro(Kernel::HLERequestContext& ctx) { @@ -422,30 +548,15 @@ public: return; } - auto& vm_manager = system.CurrentProcess()->VMManager(); - const auto& nro_info = iter->second; - - // Unmap the mirrored memory - ASSERT( - vm_manager.UnmapRange(nro_address, nro_info.nro_size + nro_info.bss_size).IsSuccess()); - - // Reprotect the source memory - ASSERT(vm_manager - .ReprotectRange(nro_info.nro_address, nro_info.nro_size, - Kernel::VMAPermission::ReadWrite) - .IsSuccess()); - if (nro_info.bss_size > 0) { - ASSERT(vm_manager - .ReprotectRange(nro_info.bss_address, nro_info.bss_size, - Kernel::VMAPermission::ReadWrite) - .IsSuccess()); - } + const auto result{UnmapNro(iter->second)}; system.InvalidateCpuInstructionCaches(); nro.erase(iter); + IPC::ResponseBuilder rb{ctx, 2}; - rb.Push(RESULT_SUCCESS); + + rb.Push(result); } void Initialize(Kernel::HLERequestContext& ctx) { @@ -458,56 +569,7 @@ public: } private: - using SHA256Hash = std::array<u8, 0x20>; - - struct NROHeader { - INSERT_PADDING_WORDS(1); - u32_le mod_offset; - INSERT_PADDING_WORDS(2); - u32_le magic; - u32_le version; - u32_le nro_size; - u32_le flags; - u32_le text_offset; - u32_le text_size; - u32_le ro_offset; - u32_le ro_size; - u32_le rw_offset; - u32_le rw_size; - u32_le bss_size; - INSERT_PADDING_WORDS(1); - std::array<u8, 0x20> build_id; - INSERT_PADDING_BYTES(0x20); - }; - static_assert(sizeof(NROHeader) == 0x80, "NROHeader has invalid size."); - - struct NRRHeader { - u32_le magic; - INSERT_PADDING_BYTES(12); - u64_le title_id_mask; - u64_le title_id_pattern; - INSERT_PADDING_BYTES(16); - std::array<u8, 0x100> modulus; - std::array<u8, 0x100> signature_1; - std::array<u8, 0x100> signature_2; - u64_le title_id; - u32_le size; - INSERT_PADDING_BYTES(4); - u32_le hash_offset; - u32_le hash_count; - INSERT_PADDING_BYTES(8); - }; - static_assert(sizeof(NRRHeader) == 0x350, "NRRHeader has incorrect size."); - - struct NROInfo { - SHA256Hash hash; - VAddr nro_address; - u64 nro_size; - VAddr bss_address; - u64 bss_size; - }; - - bool initialized = false; + bool initialized{}; std::map<VAddr, NROInfo> nro; std::map<VAddr, std::vector<SHA256Hash>> nrr; diff --git a/src/core/hle/service/lm/lm.cpp b/src/core/hle/service/lm/lm.cpp index 346c8f899..dec96b771 100644 --- a/src/core/hle/service/lm/lm.cpp +++ b/src/core/hle/service/lm/lm.cpp @@ -17,7 +17,7 @@ namespace Service::LM { class ILogger final : public ServiceFramework<ILogger> { public: - explicit ILogger(Manager& manager_, Memory::Memory& memory_) + explicit ILogger(Manager& manager_, Core::Memory::Memory& memory_) : ServiceFramework("ILogger"), manager{manager_}, memory{memory_} { static const FunctionInfo functions[] = { {0, &ILogger::Log, "Log"}, @@ -75,12 +75,12 @@ private: } Manager& manager; - Memory::Memory& memory; + Core::Memory::Memory& memory; }; class LM final : public ServiceFramework<LM> { public: - explicit LM(Manager& manager_, Memory::Memory& memory_) + explicit LM(Manager& manager_, Core::Memory::Memory& memory_) : ServiceFramework{"lm"}, manager{manager_}, memory{memory_} { // clang-format off static const FunctionInfo functions[] = { @@ -101,7 +101,7 @@ private: } Manager& manager; - Memory::Memory& memory; + Core::Memory::Memory& memory; }; void InstallInterfaces(Core::System& system) { diff --git a/src/core/hle/service/ns/pl_u.cpp b/src/core/hle/service/ns/pl_u.cpp index 8da4e52c5..ab1746d28 100644 --- a/src/core/hle/service/ns/pl_u.cpp +++ b/src/core/hle/service/ns/pl_u.cpp @@ -19,6 +19,7 @@ #include "core/file_sys/romfs.h" #include "core/file_sys/system_archive/system_archive.h" #include "core/hle/ipc_helpers.h" +#include "core/hle/kernel/kernel.h" #include "core/hle/kernel/physical_memory.h" #include "core/hle/kernel/shared_memory.h" #include "core/hle/service/filesystem/filesystem.h" @@ -265,16 +266,13 @@ void PL_U::GetSharedMemoryAddressOffset(Kernel::HLERequestContext& ctx) { void PL_U::GetSharedMemoryNativeHandle(Kernel::HLERequestContext& ctx) { // Map backing memory for the font data LOG_DEBUG(Service_NS, "called"); - system.CurrentProcess()->VMManager().MapMemoryBlock(SHARED_FONT_MEM_VADDR, impl->shared_font, 0, - SHARED_FONT_MEM_SIZE, - Kernel::MemoryState::Shared); // Create shared font memory object auto& kernel = system.Kernel(); - impl->shared_font_mem = Kernel::SharedMemory::Create( - kernel, system.CurrentProcess(), SHARED_FONT_MEM_SIZE, Kernel::MemoryPermission::ReadWrite, - Kernel::MemoryPermission::Read, SHARED_FONT_MEM_VADDR, Kernel::MemoryRegion::BASE, - "PL_U:shared_font_mem"); + impl->shared_font_mem = SharedFrom(&kernel.GetFontSharedMem()); + + std::memcpy(impl->shared_font_mem->GetPointer(), impl->shared_font->data(), + impl->shared_font->size()); IPC::ResponseBuilder rb{ctx, 2, 1}; rb.Push(RESULT_SUCCESS); diff --git a/src/core/hle/service/time/standard_network_system_clock_core.h b/src/core/hle/service/time/standard_network_system_clock_core.h index 3f505c37c..c993bdf79 100644 --- a/src/core/hle/service/time/standard_network_system_clock_core.h +++ b/src/core/hle/service/time/standard_network_system_clock_core.h @@ -23,7 +23,7 @@ public: standard_network_clock_sufficient_accuracy = value; } - bool IsStandardNetworkSystemClockAccuracySufficient(Core::System& system) { + bool IsStandardNetworkSystemClockAccuracySufficient(Core::System& system) const { SystemClockContext context{}; if (GetClockContext(system, context) != RESULT_SUCCESS) { return {}; diff --git a/src/core/hle/service/time/steady_clock_core.h b/src/core/hle/service/time/steady_clock_core.h index 84af3d105..d80a2385f 100644 --- a/src/core/hle/service/time/steady_clock_core.h +++ b/src/core/hle/service/time/steady_clock_core.h @@ -16,6 +16,7 @@ namespace Service::Time::Clock { class SteadyClockCore { public: SteadyClockCore() = default; + virtual ~SteadyClockCore() = default; const Common::UUID& GetClockSourceId() const { return clock_source_id; diff --git a/src/core/hle/service/time/system_clock_context_update_callback.h b/src/core/hle/service/time/system_clock_context_update_callback.h index 6260de6c3..2b0fa7e75 100644 --- a/src/core/hle/service/time/system_clock_context_update_callback.h +++ b/src/core/hle/service/time/system_clock_context_update_callback.h @@ -20,7 +20,7 @@ namespace Service::Time::Clock { class SystemClockContextUpdateCallback { public: SystemClockContextUpdateCallback(); - ~SystemClockContextUpdateCallback(); + virtual ~SystemClockContextUpdateCallback(); bool NeedUpdate(const SystemClockContext& value) const; diff --git a/src/core/hle/service/time/system_clock_core.cpp b/src/core/hle/service/time/system_clock_core.cpp index 1a3ab8cfa..d31d4e2ca 100644 --- a/src/core/hle/service/time/system_clock_core.cpp +++ b/src/core/hle/service/time/system_clock_core.cpp @@ -9,7 +9,7 @@ namespace Service::Time::Clock { SystemClockCore::SystemClockCore(SteadyClockCore& steady_clock_core) - : steady_clock_core{steady_clock_core}, is_initialized{} { + : steady_clock_core{steady_clock_core} { context.steady_time_point.clock_source_id = steady_clock_core.GetClockSourceId(); } diff --git a/src/core/hle/service/time/system_clock_core.h b/src/core/hle/service/time/system_clock_core.h index 54407a6c5..608dd3b2e 100644 --- a/src/core/hle/service/time/system_clock_core.h +++ b/src/core/hle/service/time/system_clock_core.h @@ -22,7 +22,7 @@ class SystemClockContextUpdateCallback; class SystemClockCore { public: explicit SystemClockCore(SteadyClockCore& steady_clock_core); - ~SystemClockCore(); + virtual ~SystemClockCore(); SteadyClockCore& GetSteadyClockCore() const { return steady_clock_core; diff --git a/src/core/hle/service/time/time_sharedmemory.cpp b/src/core/hle/service/time/time_sharedmemory.cpp index fdaef233f..999ec1e51 100644 --- a/src/core/hle/service/time/time_sharedmemory.cpp +++ b/src/core/hle/service/time/time_sharedmemory.cpp @@ -6,6 +6,7 @@ #include "core/core_timing.h" #include "core/core_timing_util.h" #include "core/hardware_properties.h" +#include "core/hle/kernel/kernel.h" #include "core/hle/service/time/clock_types.h" #include "core/hle/service/time/steady_clock_core.h" #include "core/hle/service/time/time_sharedmemory.h" @@ -15,9 +16,7 @@ namespace Service::Time { static constexpr std::size_t SHARED_MEMORY_SIZE{0x1000}; SharedMemory::SharedMemory(Core::System& system) : system(system) { - shared_memory_holder = Kernel::SharedMemory::Create( - system.Kernel(), nullptr, SHARED_MEMORY_SIZE, Kernel::MemoryPermission::ReadWrite, - Kernel::MemoryPermission::Read, 0, Kernel::MemoryRegion::BASE, "Time:SharedMemory"); + shared_memory_holder = SharedFrom(&system.Kernel().GetTimeSharedMem()); std::memset(shared_memory_holder->GetPointer(), 0, SHARED_MEMORY_SIZE); } diff --git a/src/core/hle/service/time/time_zone_manager.cpp b/src/core/hle/service/time/time_zone_manager.cpp index c8159bcd5..69152d0ac 100644 --- a/src/core/hle/service/time/time_zone_manager.cpp +++ b/src/core/hle/service/time/time_zone_manager.cpp @@ -518,8 +518,8 @@ static bool ParseTimeZoneBinary(TimeZoneRule& time_zone_rule, FileSys::VirtualFi constexpr s32 time_zone_max_leaps{50}; constexpr s32 time_zone_max_chars{50}; if (!(0 <= header.leap_count && header.leap_count < time_zone_max_leaps && - 0 < header.type_count && header.type_count < time_zone_rule.ttis.size() && - 0 <= header.time_count && header.time_count < time_zone_rule.ats.size() && + 0 < header.type_count && header.type_count < s32(time_zone_rule.ttis.size()) && + 0 <= header.time_count && header.time_count < s32(time_zone_rule.ats.size()) && 0 <= header.char_count && header.char_count < time_zone_max_chars && (header.ttis_std_count == header.type_count || header.ttis_std_count == 0) && (header.ttis_gmt_count == header.type_count || header.ttis_gmt_count == 0))) { diff --git a/src/core/loader/deconstructed_rom_directory.cpp b/src/core/loader/deconstructed_rom_directory.cpp index 53559e8b1..134e83412 100644 --- a/src/core/loader/deconstructed_rom_directory.cpp +++ b/src/core/loader/deconstructed_rom_directory.cpp @@ -14,6 +14,7 @@ #include "core/file_sys/romfs_factory.h" #include "core/gdbstub/gdbstub.h" #include "core/hle/kernel/kernel.h" +#include "core/hle/kernel/memory/page_table.h" #include "core/hle/kernel/process.h" #include "core/hle/service/filesystem/filesystem.h" #include "core/loader/deconstructed_rom_directory.h" @@ -129,27 +130,47 @@ AppLoader_DeconstructedRomDirectory::LoadResult AppLoader_DeconstructedRomDirect } metadata.Print(); - if (process.LoadFromMetadata(metadata).IsError()) { - return {ResultStatus::ErrorUnableToParseKernelMetadata, {}}; + const auto static_modules = {"rtld", "main", "subsdk0", "subsdk1", "subsdk2", "subsdk3", + "subsdk4", "subsdk5", "subsdk6", "subsdk7", "sdk"}; + + // Use the NSO module loader to figure out the code layout + std::size_t code_size{}; + for (const auto& module : static_modules) { + const FileSys::VirtualFile module_file{dir->GetFile(module)}; + if (!module_file) { + continue; + } + + const bool should_pass_arguments{std::strcmp(module, "rtld") == 0}; + const auto tentative_next_load_addr{AppLoader_NSO::LoadModule( + process, *module_file, code_size, should_pass_arguments, false)}; + if (!tentative_next_load_addr) { + return {ResultStatus::ErrorLoadingNSO, {}}; + } + + code_size = *tentative_next_load_addr; } - const FileSys::PatchManager pm(metadata.GetTitleID()); + // Setup the process code layout + if (process.LoadFromMetadata(metadata, code_size).IsError()) { + return {ResultStatus::ErrorUnableToParseKernelMetadata, {}}; + } // Load NSO modules modules.clear(); - const VAddr base_address = process.VMManager().GetCodeRegionBaseAddress(); - VAddr next_load_addr = base_address; - for (const auto& module : {"rtld", "main", "subsdk0", "subsdk1", "subsdk2", "subsdk3", - "subsdk4", "subsdk5", "subsdk6", "subsdk7", "sdk"}) { - const FileSys::VirtualFile module_file = dir->GetFile(module); - if (module_file == nullptr) { + const VAddr base_address{process.PageTable().GetCodeRegionStart()}; + VAddr next_load_addr{base_address}; + const FileSys::PatchManager pm{metadata.GetTitleID()}; + for (const auto& module : static_modules) { + const FileSys::VirtualFile module_file{dir->GetFile(module)}; + if (!module_file) { continue; } - const VAddr load_addr = next_load_addr; - const bool should_pass_arguments = std::strcmp(module, "rtld") == 0; - const auto tentative_next_load_addr = - AppLoader_NSO::LoadModule(process, *module_file, load_addr, should_pass_arguments, pm); + const VAddr load_addr{next_load_addr}; + const bool should_pass_arguments{std::strcmp(module, "rtld") == 0}; + const auto tentative_next_load_addr{AppLoader_NSO::LoadModule( + process, *module_file, load_addr, should_pass_arguments, true, pm)}; if (!tentative_next_load_addr) { return {ResultStatus::ErrorLoadingNSO, {}}; } diff --git a/src/core/loader/elf.cpp b/src/core/loader/elf.cpp index 8908e5328..8f7615115 100644 --- a/src/core/loader/elf.cpp +++ b/src/core/loader/elf.cpp @@ -10,8 +10,8 @@ #include "common/file_util.h" #include "common/logging/log.h" #include "core/hle/kernel/code_set.h" +#include "core/hle/kernel/memory/page_table.h" #include "core/hle/kernel/process.h" -#include "core/hle/kernel/vm_manager.h" #include "core/loader/elf.h" #include "core/memory.h" @@ -393,15 +393,20 @@ AppLoader_ELF::LoadResult AppLoader_ELF::Load(Kernel::Process& process) { return {ResultStatus::ErrorIncorrectELFFileSize, {}}; } - const VAddr base_address = process.VMManager().GetCodeRegionBaseAddress(); + const VAddr base_address = process.PageTable().GetCodeRegionStart(); ElfReader elf_reader(&buffer[0]); Kernel::CodeSet codeset = elf_reader.LoadInto(base_address); const VAddr entry_point = codeset.entrypoint; + // Setup the process code layout + if (process.LoadFromMetadata(FileSys::ProgramMetadata::GetDefault(), buffer.size()).IsError()) { + return {ResultStatus::ErrorNotInitialized, {}}; + } + process.LoadModule(std::move(codeset), entry_point); is_loaded = true; - return {ResultStatus::Success, LoadParameters{48, Memory::DEFAULT_STACK_SIZE}}; + return {ResultStatus::Success, LoadParameters{48, Core::Memory::DEFAULT_STACK_SIZE}}; } } // namespace Loader diff --git a/src/core/loader/kip.cpp b/src/core/loader/kip.cpp index 092103abe..40fa03ad1 100644 --- a/src/core/loader/kip.cpp +++ b/src/core/loader/kip.cpp @@ -7,14 +7,16 @@ #include "core/file_sys/program_metadata.h" #include "core/gdbstub/gdbstub.h" #include "core/hle/kernel/code_set.h" +#include "core/hle/kernel/memory/page_table.h" #include "core/hle/kernel/process.h" #include "core/loader/kip.h" +#include "core/memory.h" namespace Loader { namespace { constexpr u32 PageAlignSize(u32 size) { - return (size + Memory::PAGE_MASK) & ~Memory::PAGE_MASK; + return (size + Core::Memory::PAGE_MASK) & ~Core::Memory::PAGE_MASK; } } // Anonymous namespace @@ -68,7 +70,7 @@ AppLoader::LoadResult AppLoader_KIP::Load(Kernel::Process& process) { kip->GetMainThreadCpuCore(), kip->GetMainThreadStackSize(), kip->GetTitleID(), 0xFFFFFFFFFFFFFFFF, kip->GetKernelCapabilities()); - const VAddr base_address = process.VMManager().GetCodeRegionBaseAddress(); + const VAddr base_address = process.PageTable().GetCodeRegionStart(); Kernel::CodeSet codeset; Kernel::PhysicalMemory program_image; diff --git a/src/core/loader/nro.cpp b/src/core/loader/nro.cpp index 175898b91..906544bc9 100644 --- a/src/core/loader/nro.cpp +++ b/src/core/loader/nro.cpp @@ -16,8 +16,8 @@ #include "core/file_sys/vfs_offset.h" #include "core/gdbstub/gdbstub.h" #include "core/hle/kernel/code_set.h" +#include "core/hle/kernel/memory/page_table.h" #include "core/hle/kernel/process.h" -#include "core/hle/kernel/vm_manager.h" #include "core/hle/service/filesystem/filesystem.h" #include "core/loader/nro.h" #include "core/loader/nso.h" @@ -127,11 +127,11 @@ FileType AppLoader_NRO::IdentifyType(const FileSys::VirtualFile& file) { } static constexpr u32 PageAlignSize(u32 size) { - return (size + Memory::PAGE_MASK) & ~Memory::PAGE_MASK; + return (size + Core::Memory::PAGE_MASK) & ~Core::Memory::PAGE_MASK; } static bool LoadNroImpl(Kernel::Process& process, const std::vector<u8>& data, - const std::string& name, VAddr load_base) { + const std::string& name) { if (data.size() < sizeof(NroHeader)) { return {}; } @@ -187,19 +187,25 @@ static bool LoadNroImpl(Kernel::Process& process, const std::vector<u8>& data, codeset.DataSegment().size += bss_size; program_image.resize(static_cast<u32>(program_image.size()) + bss_size); + // Setup the process code layout + if (process.LoadFromMetadata(FileSys::ProgramMetadata::GetDefault(), program_image.size()) + .IsError()) { + return false; + } + // Load codeset for current process codeset.memory = std::move(program_image); - process.LoadModule(std::move(codeset), load_base); + process.LoadModule(std::move(codeset), process.PageTable().GetCodeRegionStart()); // Register module with GDBStub - GDBStub::RegisterModule(name, load_base, load_base); + GDBStub::RegisterModule(name, process.PageTable().GetCodeRegionStart(), + process.PageTable().GetCodeRegionEnd()); return true; } -bool AppLoader_NRO::LoadNro(Kernel::Process& process, const FileSys::VfsFile& file, - VAddr load_base) { - return LoadNroImpl(process, file.ReadAllBytes(), file.GetName(), load_base); +bool AppLoader_NRO::LoadNro(Kernel::Process& process, const FileSys::VfsFile& file) { + return LoadNroImpl(process, file.ReadAllBytes(), file.GetName()); } AppLoader_NRO::LoadResult AppLoader_NRO::Load(Kernel::Process& process) { @@ -207,10 +213,7 @@ AppLoader_NRO::LoadResult AppLoader_NRO::Load(Kernel::Process& process) { return {ResultStatus::ErrorAlreadyLoaded, {}}; } - // Load NRO - const VAddr base_address = process.VMManager().GetCodeRegionBaseAddress(); - - if (!LoadNro(process, *file, base_address)) { + if (!LoadNro(process, *file)) { return {ResultStatus::ErrorLoadingNRO, {}}; } @@ -221,7 +224,7 @@ AppLoader_NRO::LoadResult AppLoader_NRO::Load(Kernel::Process& process) { is_loaded = true; return {ResultStatus::Success, - LoadParameters{Kernel::THREADPRIO_DEFAULT, Memory::DEFAULT_STACK_SIZE}}; + LoadParameters{Kernel::THREADPRIO_DEFAULT, Core::Memory::DEFAULT_STACK_SIZE}}; } ResultStatus AppLoader_NRO::ReadIcon(std::vector<u8>& buffer) { diff --git a/src/core/loader/nro.h b/src/core/loader/nro.h index 71811bc29..4593d48fb 100644 --- a/src/core/loader/nro.h +++ b/src/core/loader/nro.h @@ -47,7 +47,7 @@ public: bool IsRomFSUpdatable() const override; private: - bool LoadNro(Kernel::Process& process, const FileSys::VfsFile& file, VAddr load_base); + bool LoadNro(Kernel::Process& process, const FileSys::VfsFile& file); std::vector<u8> icon_data; std::unique_ptr<FileSys::NACP> nacp; diff --git a/src/core/loader/nso.cpp b/src/core/loader/nso.cpp index 044067a5b..575330a86 100644 --- a/src/core/loader/nso.cpp +++ b/src/core/loader/nso.cpp @@ -16,8 +16,8 @@ #include "core/file_sys/patch_manager.h" #include "core/gdbstub/gdbstub.h" #include "core/hle/kernel/code_set.h" +#include "core/hle/kernel/memory/page_table.h" #include "core/hle/kernel/process.h" -#include "core/hle/kernel/vm_manager.h" #include "core/loader/nso.h" #include "core/memory.h" #include "core/settings.h" @@ -37,7 +37,7 @@ static_assert(sizeof(MODHeader) == 0x1c, "MODHeader has incorrect size."); std::vector<u8> DecompressSegment(const std::vector<u8>& compressed_data, const NSOSegmentHeader& header) { - const std::vector<u8> uncompressed_data = + std::vector<u8> uncompressed_data = Common::Compression::DecompressDataLZ4(compressed_data, header.size); ASSERT_MSG(uncompressed_data.size() == header.size, "{} != {}", header.size, @@ -47,7 +47,7 @@ std::vector<u8> DecompressSegment(const std::vector<u8>& compressed_data, } constexpr u32 PageAlignSize(u32 size) { - return (size + Memory::PAGE_MASK) & ~Memory::PAGE_MASK; + return (size + Core::Memory::PAGE_MASK) & ~Core::Memory::PAGE_MASK; } } // Anonymous namespace @@ -73,7 +73,7 @@ FileType AppLoader_NSO::IdentifyType(const FileSys::VirtualFile& file) { std::optional<VAddr> AppLoader_NSO::LoadModule(Kernel::Process& process, const FileSys::VfsFile& file, VAddr load_base, - bool should_pass_arguments, + bool should_pass_arguments, bool load_into_process, std::optional<FileSys::PatchManager> pm) { if (file.GetSize() < sizeof(NSOHeader)) { return {}; @@ -97,21 +97,17 @@ std::optional<VAddr> AppLoader_NSO::LoadModule(Kernel::Process& process, if (nso_header.IsSegmentCompressed(i)) { data = DecompressSegment(data, nso_header.segments[i]); } - program_image.resize(nso_header.segments[i].location + - PageAlignSize(static_cast<u32>(data.size()))); + program_image.resize(nso_header.segments[i].location + static_cast<u32>(data.size())); std::memcpy(program_image.data() + nso_header.segments[i].location, data.data(), data.size()); codeset.segments[i].addr = nso_header.segments[i].location; codeset.segments[i].offset = nso_header.segments[i].location; - codeset.segments[i].size = PageAlignSize(static_cast<u32>(data.size())); + codeset.segments[i].size = nso_header.segments[i].size; } - if (should_pass_arguments) { - std::vector<u8> arg_data{Settings::values.program_args.begin(), - Settings::values.program_args.end()}; - if (arg_data.empty()) { - arg_data.resize(NSO_ARGUMENT_DEFAULT_SIZE); - } + if (should_pass_arguments && !Settings::values.program_args.empty()) { + const auto arg_data{Settings::values.program_args}; + codeset.DataSegment().size += NSO_ARGUMENT_DATA_ALLOCATION_SIZE; NSOArgumentHeader args_header{ NSO_ARGUMENT_DATA_ALLOCATION_SIZE, static_cast<u32_le>(arg_data.size()), {}}; @@ -123,24 +119,15 @@ std::optional<VAddr> AppLoader_NSO::LoadModule(Kernel::Process& process, arg_data.size()); } - // MOD header pointer is at .text offset + 4 - u32 module_offset; - std::memcpy(&module_offset, program_image.data() + 4, sizeof(u32)); - - // Read MOD header - MODHeader mod_header{}; - // Default .bss to size in segment header if MOD0 section doesn't exist - u32 bss_size{PageAlignSize(nso_header.segments[2].bss_size)}; - std::memcpy(&mod_header, program_image.data() + module_offset, sizeof(MODHeader)); - const bool has_mod_header{mod_header.magic == Common::MakeMagic('M', 'O', 'D', '0')}; - if (has_mod_header) { - // Resize program image to include .bss section and page align each section - bss_size = PageAlignSize(mod_header.bss_end_offset - mod_header.bss_start_offset); - } - codeset.DataSegment().size += bss_size; - const u32 image_size{PageAlignSize(static_cast<u32>(program_image.size()) + bss_size)}; + codeset.DataSegment().size += nso_header.segments[2].bss_size; + const u32 image_size{ + PageAlignSize(static_cast<u32>(program_image.size()) + nso_header.segments[2].bss_size)}; program_image.resize(image_size); + for (std::size_t i = 0; i < nso_header.segments.size(); ++i) { + codeset.segments[i].size = PageAlignSize(codeset.segments[i].size); + } + // Apply patches if necessary if (pm && (pm->HasNSOPatch(nso_header.build_id) || Settings::values.dump_nso)) { std::vector<u8> pi_header; @@ -154,6 +141,11 @@ std::optional<VAddr> AppLoader_NSO::LoadModule(Kernel::Process& process, std::copy(pi_header.begin() + sizeof(NSOHeader), pi_header.end(), program_image.data()); } + // If we aren't actually loading (i.e. just computing the process code layout), we are done + if (!load_into_process) { + return load_base + image_size; + } + // Apply cheats if they exist and the program has a valid title ID if (pm) { auto& system = Core::System::GetInstance(); @@ -182,8 +174,8 @@ AppLoader_NSO::LoadResult AppLoader_NSO::Load(Kernel::Process& process) { modules.clear(); // Load module - const VAddr base_address = process.VMManager().GetCodeRegionBaseAddress(); - if (!LoadModule(process, *file, base_address, true)) { + const VAddr base_address = process.PageTable().GetCodeRegionStart(); + if (!LoadModule(process, *file, base_address, true, true)) { return {ResultStatus::ErrorLoadingNSO, {}}; } @@ -192,7 +184,7 @@ AppLoader_NSO::LoadResult AppLoader_NSO::Load(Kernel::Process& process) { is_loaded = true; return {ResultStatus::Success, - LoadParameters{Kernel::THREADPRIO_DEFAULT, Memory::DEFAULT_STACK_SIZE}}; + LoadParameters{Kernel::THREADPRIO_DEFAULT, Core::Memory::DEFAULT_STACK_SIZE}}; } ResultStatus AppLoader_NSO::ReadNSOModules(Modules& modules) { diff --git a/src/core/loader/nso.h b/src/core/loader/nso.h index d2d600cd9..b210830f0 100644 --- a/src/core/loader/nso.h +++ b/src/core/loader/nso.h @@ -56,8 +56,6 @@ static_assert(sizeof(NSOHeader) == 0x100, "NSOHeader has incorrect size."); static_assert(std::is_trivially_copyable_v<NSOHeader>, "NSOHeader must be trivially copyable."); constexpr u64 NSO_ARGUMENT_DATA_ALLOCATION_SIZE = 0x9000; -// NOTE: Official software default argument state is unverified. -constexpr u64 NSO_ARGUMENT_DEFAULT_SIZE = 1; struct NSOArgumentHeader { u32_le allocated_size; @@ -84,6 +82,7 @@ public: static std::optional<VAddr> LoadModule(Kernel::Process& process, const FileSys::VfsFile& file, VAddr load_base, bool should_pass_arguments, + bool load_into_process, std::optional<FileSys::PatchManager> pm = {}); LoadResult Load(Kernel::Process& process) override; diff --git a/src/core/memory.cpp b/src/core/memory.cpp index 6061d37ae..9d87045a0 100644 --- a/src/core/memory.cpp +++ b/src/core/memory.cpp @@ -14,13 +14,14 @@ #include "common/swap.h" #include "core/arm/arm_interface.h" #include "core/core.h" +#include "core/device_memory.h" +#include "core/hle/kernel/memory/page_table.h" #include "core/hle/kernel/physical_memory.h" #include "core/hle/kernel/process.h" -#include "core/hle/kernel/vm_manager.h" #include "core/memory.h" #include "video_core/gpu.h" -namespace Memory { +namespace Core::Memory { // Implementation class used to keep the specifics of the memory subsystem hidden // from outside classes. This also allows modification to the internals of the memory @@ -29,9 +30,9 @@ struct Memory::Impl { explicit Impl(Core::System& system_) : system{system_} {} void SetCurrentPageTable(Kernel::Process& process) { - current_page_table = &process.VMManager().page_table; + current_page_table = &process.PageTable().PageTableImpl(); - const std::size_t address_space_width = process.VMManager().GetAddressSpaceWidth(); + const std::size_t address_space_width = process.PageTable().GetAddressSpaceWidth(); system.ArmInterface(0).PageTableChanged(*current_page_table, address_space_width); system.ArmInterface(1).PageTableChanged(*current_page_table, address_space_width); @@ -39,12 +40,7 @@ struct Memory::Impl { system.ArmInterface(3).PageTableChanged(*current_page_table, address_space_width); } - void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, - Kernel::PhysicalMemory& memory, VAddr offset) { - MapMemoryRegion(page_table, base, size, memory.data() + offset); - } - - void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, u8* target) { + void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, PAddr target) { ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size); ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base); MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, target, Common::PageType::Memory); @@ -52,46 +48,27 @@ struct Memory::Impl { void MapIoRegion(Common::PageTable& page_table, VAddr base, u64 size, Common::MemoryHookPointer mmio_handler) { - ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size); - ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base); - MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr, - Common::PageType::Special); - - const auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1); - const Common::SpecialRegion region{Common::SpecialRegion::Type::IODevice, - std::move(mmio_handler)}; - page_table.special_regions.add( - std::make_pair(interval, std::set<Common::SpecialRegion>{region})); + UNIMPLEMENTED(); } void UnmapRegion(Common::PageTable& page_table, VAddr base, u64 size) { ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size); ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base); - MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr, - Common::PageType::Unmapped); - - const auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1); - page_table.special_regions.erase(interval); + MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, 0, Common::PageType::Unmapped); } void AddDebugHook(Common::PageTable& page_table, VAddr base, u64 size, Common::MemoryHookPointer hook) { - const auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1); - const Common::SpecialRegion region{Common::SpecialRegion::Type::DebugHook, std::move(hook)}; - page_table.special_regions.add( - std::make_pair(interval, std::set<Common::SpecialRegion>{region})); + UNIMPLEMENTED(); } void RemoveDebugHook(Common::PageTable& page_table, VAddr base, u64 size, Common::MemoryHookPointer hook) { - const auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1); - const Common::SpecialRegion region{Common::SpecialRegion::Type::DebugHook, std::move(hook)}; - page_table.special_regions.subtract( - std::make_pair(interval, std::set<Common::SpecialRegion>{region})); + UNIMPLEMENTED(); } bool IsValidVirtualAddress(const Kernel::Process& process, const VAddr vaddr) const { - const auto& page_table = process.VMManager().page_table; + const auto& page_table = process.PageTable().PageTableImpl(); const u8* const page_pointer = page_table.pointers[vaddr >> PAGE_BITS]; if (page_pointer != nullptr) { @@ -113,55 +90,28 @@ struct Memory::Impl { return IsValidVirtualAddress(*system.CurrentProcess(), vaddr); } - /** - * Gets a pointer to the exact memory at the virtual address (i.e. not page aligned) - * using a VMA from the current process - */ - u8* GetPointerFromVMA(const Kernel::Process& process, VAddr vaddr) { - const auto& vm_manager = process.VMManager(); + u8* GetPointerFromRasterizerCachedMemory(VAddr vaddr) const { + const PAddr paddr{current_page_table->backing_addr[vaddr >> PAGE_BITS]}; - const auto it = vm_manager.FindVMA(vaddr); - DEBUG_ASSERT(vm_manager.IsValidHandle(it)); - - u8* direct_pointer = nullptr; - const auto& vma = it->second; - switch (vma.type) { - case Kernel::VMAType::AllocatedMemoryBlock: - direct_pointer = vma.backing_block->data() + vma.offset; - break; - case Kernel::VMAType::BackingMemory: - direct_pointer = vma.backing_memory; - break; - case Kernel::VMAType::Free: - return nullptr; - default: - UNREACHABLE(); + if (!paddr) { + return {}; } - return direct_pointer + (vaddr - vma.base); + return system.DeviceMemory().GetPointer(paddr) + vaddr; } - /** - * Gets a pointer to the exact memory at the virtual address (i.e. not page aligned) - * using a VMA from the current process. - */ - u8* GetPointerFromVMA(VAddr vaddr) { - return GetPointerFromVMA(*system.CurrentProcess(), vaddr); - } - - u8* GetPointer(const VAddr vaddr) { - u8* const page_pointer = current_page_table->pointers[vaddr >> PAGE_BITS]; - if (page_pointer != nullptr) { + u8* GetPointer(const VAddr vaddr) const { + u8* const page_pointer{current_page_table->pointers[vaddr >> PAGE_BITS]}; + if (page_pointer) { return page_pointer + vaddr; } if (current_page_table->attributes[vaddr >> PAGE_BITS] == Common::PageType::RasterizerCachedMemory) { - return GetPointerFromVMA(vaddr); + return GetPointerFromRasterizerCachedMemory(vaddr); } - LOG_ERROR(HW_Memory, "Unknown GetPointer @ 0x{:016X}", vaddr); - return nullptr; + return {}; } u8 Read8(const VAddr addr) { @@ -169,15 +119,33 @@ struct Memory::Impl { } u16 Read16(const VAddr addr) { - return Read<u16_le>(addr); + if ((addr & 1) == 0) { + return Read<u16_le>(addr); + } else { + const u8 a{Read<u8>(addr)}; + const u8 b{Read<u8>(addr + sizeof(u8))}; + return (static_cast<u16>(b) << 8) | a; + } } u32 Read32(const VAddr addr) { - return Read<u32_le>(addr); + if ((addr & 3) == 0) { + return Read<u32_le>(addr); + } else { + const u16 a{Read16(addr)}; + const u16 b{Read16(addr + sizeof(u16))}; + return (static_cast<u32>(b) << 16) | a; + } } u64 Read64(const VAddr addr) { - return Read<u64_le>(addr); + if ((addr & 7) == 0) { + return Read<u64_le>(addr); + } else { + const u32 a{Read32(addr)}; + const u32 b{Read32(addr + sizeof(u32))}; + return (static_cast<u64>(b) << 32) | a; + } } void Write8(const VAddr addr, const u8 data) { @@ -185,15 +153,30 @@ struct Memory::Impl { } void Write16(const VAddr addr, const u16 data) { - Write<u16_le>(addr, data); + if ((addr & 1) == 0) { + Write<u16_le>(addr, data); + } else { + Write<u8>(addr, static_cast<u8>(data)); + Write<u8>(addr + sizeof(u8), static_cast<u8>(data >> 8)); + } } void Write32(const VAddr addr, const u32 data) { - Write<u32_le>(addr, data); + if ((addr & 3) == 0) { + Write<u32_le>(addr, data); + } else { + Write16(addr, static_cast<u16>(data)); + Write16(addr + sizeof(u16), static_cast<u16>(data >> 16)); + } } void Write64(const VAddr addr, const u64 data) { - Write<u64_le>(addr, data); + if ((addr & 7) == 0) { + Write<u64_le>(addr, data); + } else { + Write32(addr, static_cast<u32>(data)); + Write32(addr + sizeof(u32), static_cast<u32>(data >> 32)); + } } std::string ReadCString(VAddr vaddr, std::size_t max_length) { @@ -213,7 +196,7 @@ struct Memory::Impl { void ReadBlock(const Kernel::Process& process, const VAddr src_addr, void* dest_buffer, const std::size_t size) { - const auto& page_table = process.VMManager().page_table; + const auto& page_table = process.PageTable().PageTableImpl(); std::size_t remaining_size = size; std::size_t page_index = src_addr >> PAGE_BITS; @@ -241,7 +224,7 @@ struct Memory::Impl { break; } case Common::PageType::RasterizerCachedMemory: { - const u8* const host_ptr = GetPointerFromVMA(process, current_vaddr); + const u8* const host_ptr{GetPointerFromRasterizerCachedMemory(current_vaddr)}; system.GPU().FlushRegion(current_vaddr, copy_amount); std::memcpy(dest_buffer, host_ptr, copy_amount); break; @@ -259,7 +242,7 @@ struct Memory::Impl { void ReadBlockUnsafe(const Kernel::Process& process, const VAddr src_addr, void* dest_buffer, const std::size_t size) { - const auto& page_table = process.VMManager().page_table; + const auto& page_table = process.PageTable().PageTableImpl(); std::size_t remaining_size = size; std::size_t page_index = src_addr >> PAGE_BITS; @@ -287,7 +270,7 @@ struct Memory::Impl { break; } case Common::PageType::RasterizerCachedMemory: { - const u8* const host_ptr = GetPointerFromVMA(process, current_vaddr); + const u8* const host_ptr{GetPointerFromRasterizerCachedMemory(current_vaddr)}; std::memcpy(dest_buffer, host_ptr, copy_amount); break; } @@ -312,7 +295,7 @@ struct Memory::Impl { void WriteBlock(const Kernel::Process& process, const VAddr dest_addr, const void* src_buffer, const std::size_t size) { - const auto& page_table = process.VMManager().page_table; + const auto& page_table = process.PageTable().PageTableImpl(); std::size_t remaining_size = size; std::size_t page_index = dest_addr >> PAGE_BITS; std::size_t page_offset = dest_addr & PAGE_MASK; @@ -338,7 +321,7 @@ struct Memory::Impl { break; } case Common::PageType::RasterizerCachedMemory: { - u8* const host_ptr = GetPointerFromVMA(process, current_vaddr); + u8* const host_ptr{GetPointerFromRasterizerCachedMemory(current_vaddr)}; system.GPU().InvalidateRegion(current_vaddr, copy_amount); std::memcpy(host_ptr, src_buffer, copy_amount); break; @@ -356,7 +339,7 @@ struct Memory::Impl { void WriteBlockUnsafe(const Kernel::Process& process, const VAddr dest_addr, const void* src_buffer, const std::size_t size) { - const auto& page_table = process.VMManager().page_table; + const auto& page_table = process.PageTable().PageTableImpl(); std::size_t remaining_size = size; std::size_t page_index = dest_addr >> PAGE_BITS; std::size_t page_offset = dest_addr & PAGE_MASK; @@ -382,7 +365,7 @@ struct Memory::Impl { break; } case Common::PageType::RasterizerCachedMemory: { - u8* const host_ptr = GetPointerFromVMA(process, current_vaddr); + u8* const host_ptr{GetPointerFromRasterizerCachedMemory(current_vaddr)}; std::memcpy(host_ptr, src_buffer, copy_amount); break; } @@ -406,7 +389,7 @@ struct Memory::Impl { } void ZeroBlock(const Kernel::Process& process, const VAddr dest_addr, const std::size_t size) { - const auto& page_table = process.VMManager().page_table; + const auto& page_table = process.PageTable().PageTableImpl(); std::size_t remaining_size = size; std::size_t page_index = dest_addr >> PAGE_BITS; std::size_t page_offset = dest_addr & PAGE_MASK; @@ -432,7 +415,7 @@ struct Memory::Impl { break; } case Common::PageType::RasterizerCachedMemory: { - u8* const host_ptr = GetPointerFromVMA(process, current_vaddr); + u8* const host_ptr{GetPointerFromRasterizerCachedMemory(current_vaddr)}; system.GPU().InvalidateRegion(current_vaddr, copy_amount); std::memset(host_ptr, 0, copy_amount); break; @@ -453,7 +436,7 @@ struct Memory::Impl { void CopyBlock(const Kernel::Process& process, VAddr dest_addr, VAddr src_addr, const std::size_t size) { - const auto& page_table = process.VMManager().page_table; + const auto& page_table = process.PageTable().PageTableImpl(); std::size_t remaining_size = size; std::size_t page_index = src_addr >> PAGE_BITS; std::size_t page_offset = src_addr & PAGE_MASK; @@ -479,7 +462,7 @@ struct Memory::Impl { break; } case Common::PageType::RasterizerCachedMemory: { - const u8* const host_ptr = GetPointerFromVMA(process, current_vaddr); + const u8* const host_ptr{GetPointerFromRasterizerCachedMemory(current_vaddr)}; system.GPU().FlushRegion(current_vaddr, copy_amount); WriteBlock(process, dest_addr, host_ptr, copy_amount); break; @@ -512,7 +495,7 @@ struct Memory::Impl { u64 num_pages = ((vaddr + size - 1) >> PAGE_BITS) - (vaddr >> PAGE_BITS) + 1; for (unsigned i = 0; i < num_pages; ++i, vaddr += PAGE_SIZE) { - Common::PageType& page_type = current_page_table->attributes[vaddr >> PAGE_BITS]; + Common::PageType& page_type{current_page_table->attributes[vaddr >> PAGE_BITS]}; if (cached) { // Switch page type to cached if now cached @@ -544,7 +527,7 @@ struct Memory::Impl { // that this area is already unmarked as cached. break; case Common::PageType::RasterizerCachedMemory: { - u8* pointer = GetPointerFromVMA(vaddr & ~PAGE_MASK); + u8* pointer{GetPointerFromRasterizerCachedMemory(vaddr & ~PAGE_MASK)}; if (pointer == nullptr) { // It's possible that this function has been called while updating the // pagetable after unmapping a VMA. In that case the underlying VMA will no @@ -573,9 +556,9 @@ struct Memory::Impl { * @param memory The memory to map. * @param type The page type to map the memory as. */ - void MapPages(Common::PageTable& page_table, VAddr base, u64 size, u8* memory, + void MapPages(Common::PageTable& page_table, VAddr base, u64 size, PAddr target, Common::PageType type) { - LOG_DEBUG(HW_Memory, "Mapping {} onto {:016X}-{:016X}", fmt::ptr(memory), base * PAGE_SIZE, + LOG_DEBUG(HW_Memory, "Mapping {:016X} onto {:016X}-{:016X}", target, base * PAGE_SIZE, (base + size) * PAGE_SIZE); // During boot, current_page_table might not be set yet, in which case we need not flush @@ -593,19 +576,26 @@ struct Memory::Impl { ASSERT_MSG(end <= page_table.pointers.size(), "out of range mapping at {:016X}", base + page_table.pointers.size()); - std::fill(page_table.attributes.begin() + base, page_table.attributes.begin() + end, type); + if (!target) { + while (base != end) { + page_table.pointers[base] = nullptr; + page_table.attributes[base] = type; + page_table.backing_addr[base] = 0; - if (memory == nullptr) { - std::fill(page_table.pointers.begin() + base, page_table.pointers.begin() + end, - memory); + base += 1; + } } else { while (base != end) { - page_table.pointers[base] = memory - (base << PAGE_BITS); + page_table.pointers[base] = + system.DeviceMemory().GetPointer(target) - (base << PAGE_BITS); + page_table.attributes[base] = type; + page_table.backing_addr[base] = target - (base << PAGE_BITS); + ASSERT_MSG(page_table.pointers[base], "memory mapping base yield a nullptr within the table"); base += 1; - memory += PAGE_SIZE; + target += PAGE_SIZE; } } } @@ -640,7 +630,7 @@ struct Memory::Impl { ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", vaddr); break; case Common::PageType::RasterizerCachedMemory: { - const u8* const host_ptr = GetPointerFromVMA(vaddr); + const u8* const host_ptr{GetPointerFromRasterizerCachedMemory(vaddr)}; system.GPU().FlushRegion(vaddr, sizeof(T)); T value; std::memcpy(&value, host_ptr, sizeof(T)); @@ -682,7 +672,7 @@ struct Memory::Impl { ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", vaddr); break; case Common::PageType::RasterizerCachedMemory: { - u8* const host_ptr{GetPointerFromVMA(vaddr)}; + u8* const host_ptr{GetPointerFromRasterizerCachedMemory(vaddr)}; system.GPU().InvalidateRegion(vaddr, sizeof(T)); std::memcpy(host_ptr, &data, sizeof(T)); break; @@ -703,12 +693,7 @@ void Memory::SetCurrentPageTable(Kernel::Process& process) { impl->SetCurrentPageTable(process); } -void Memory::MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, - Kernel::PhysicalMemory& memory, VAddr offset) { - impl->MapMemoryRegion(page_table, base, size, memory, offset); -} - -void Memory::MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, u8* target) { +void Memory::MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, PAddr target) { impl->MapMemoryRegion(page_table, base, size, target); } @@ -845,4 +830,4 @@ bool IsKernelVirtualAddress(const VAddr vaddr) { return KERNEL_REGION_VADDR <= vaddr && vaddr < KERNEL_REGION_END; } -} // namespace Memory +} // namespace Core::Memory diff --git a/src/core/memory.h b/src/core/memory.h index b92d678a4..9292f3b0a 100644 --- a/src/core/memory.h +++ b/src/core/memory.h @@ -23,7 +23,7 @@ class PhysicalMemory; class Process; } // namespace Kernel -namespace Memory { +namespace Core::Memory { /** * Page size used by the ARM architecture. This is the smallest granularity with which memory can @@ -67,19 +67,6 @@ public: void SetCurrentPageTable(Kernel::Process& process); /** - * Maps an physical buffer onto a region of the emulated process address space. - * - * @param page_table The page table of the emulated process. - * @param base The address to start mapping at. Must be page-aligned. - * @param size The amount of bytes to map. Must be page-aligned. - * @param memory Physical buffer with the memory backing the mapping. Must be of length - * at least `size + offset`. - * @param offset The offset within the physical memory. Must be page-aligned. - */ - void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, - Kernel::PhysicalMemory& memory, VAddr offset); - - /** * Maps an allocated buffer onto a region of the emulated process address space. * * @param page_table The page table of the emulated process. @@ -88,7 +75,7 @@ public: * @param target Buffer with the memory backing the mapping. Must be of length at least * `size`. */ - void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, u8* target); + void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, PAddr target); /** * Maps a region of the emulated process address space as a IO region. @@ -503,4 +490,4 @@ private: /// Determines if the given VAddr is a kernel address bool IsKernelVirtualAddress(VAddr vaddr); -} // namespace Memory +} // namespace Core::Memory diff --git a/src/core/memory/cheat_engine.cpp b/src/core/memory/cheat_engine.cpp index 4472500d2..b139e8465 100644 --- a/src/core/memory/cheat_engine.cpp +++ b/src/core/memory/cheat_engine.cpp @@ -10,13 +10,15 @@ #include "core/core_timing.h" #include "core/core_timing_util.h" #include "core/hardware_properties.h" +#include "core/hle/kernel/memory/page_table.h" #include "core/hle/kernel/process.h" #include "core/hle/service/hid/controllers/npad.h" #include "core/hle/service/hid/hid.h" #include "core/hle/service/sm/sm.h" +#include "core/memory.h" #include "core/memory/cheat_engine.h" -namespace Memory { +namespace Core::Memory { constexpr s64 CHEAT_ENGINE_TICKS = static_cast<s64>(Core::Hardware::BASE_CLOCK_RATE / 12); constexpr u32 KEYPAD_BITMASK = 0x3FFFFFF; @@ -194,11 +196,12 @@ void CheatEngine::Initialize() { metadata.process_id = system.CurrentProcess()->GetProcessID(); metadata.title_id = system.CurrentProcess()->GetTitleID(); - const auto& vm_manager = system.CurrentProcess()->VMManager(); - metadata.heap_extents = {vm_manager.GetHeapRegionBaseAddress(), vm_manager.GetHeapRegionSize()}; - metadata.address_space_extents = {vm_manager.GetAddressSpaceBaseAddress(), - vm_manager.GetAddressSpaceSize()}; - metadata.alias_extents = {vm_manager.GetMapRegionBaseAddress(), vm_manager.GetMapRegionSize()}; + const auto& page_table = system.CurrentProcess()->PageTable(); + metadata.heap_extents = {page_table.GetHeapRegionStart(), page_table.GetHeapRegionSize()}; + metadata.address_space_extents = {page_table.GetAddressSpaceStart(), + page_table.GetAddressSpaceSize()}; + metadata.alias_extents = {page_table.GetAliasCodeRegionStart(), + page_table.GetAliasCodeRegionSize()}; is_pending_reload.exchange(true); } @@ -230,4 +233,4 @@ void CheatEngine::FrameCallback(u64 userdata, s64 cycles_late) { core_timing.ScheduleEvent(CHEAT_ENGINE_TICKS - cycles_late, event); } -} // namespace Memory +} // namespace Core::Memory diff --git a/src/core/memory/cheat_engine.h b/src/core/memory/cheat_engine.h index 3d6b2298a..2649423f8 100644 --- a/src/core/memory/cheat_engine.h +++ b/src/core/memory/cheat_engine.h @@ -20,7 +20,7 @@ class CoreTiming; struct EventType; } // namespace Core::Timing -namespace Memory { +namespace Core::Memory { class StandardVmCallbacks : public DmntCheatVm::Callbacks { public: @@ -84,4 +84,4 @@ private: Core::System& system; }; -} // namespace Memory +} // namespace Core::Memory diff --git a/src/core/memory/dmnt_cheat_types.h b/src/core/memory/dmnt_cheat_types.h index bf68fa0fe..5e60733dc 100644 --- a/src/core/memory/dmnt_cheat_types.h +++ b/src/core/memory/dmnt_cheat_types.h @@ -26,7 +26,7 @@ #include "common/common_types.h" -namespace Memory { +namespace Core::Memory { struct MemoryRegionExtents { u64 base{}; @@ -55,4 +55,4 @@ struct CheatEntry { CheatDefinition definition{}; }; -} // namespace Memory +} // namespace Core::Memory diff --git a/src/core/memory/dmnt_cheat_vm.cpp b/src/core/memory/dmnt_cheat_vm.cpp index 5bb26a36f..fb9f36bfd 100644 --- a/src/core/memory/dmnt_cheat_vm.cpp +++ b/src/core/memory/dmnt_cheat_vm.cpp @@ -27,7 +27,7 @@ #include "core/memory/dmnt_cheat_types.h" #include "core/memory/dmnt_cheat_vm.h" -namespace Memory { +namespace Core::Memory { DmntCheatVm::DmntCheatVm(std::unique_ptr<Callbacks> callbacks) : callbacks(std::move(callbacks)) {} @@ -1210,4 +1210,4 @@ void DmntCheatVm::Execute(const CheatProcessMetadata& metadata) { } } -} // namespace Memory +} // namespace Core::Memory diff --git a/src/core/memory/dmnt_cheat_vm.h b/src/core/memory/dmnt_cheat_vm.h index c36212cf1..8351fd798 100644 --- a/src/core/memory/dmnt_cheat_vm.h +++ b/src/core/memory/dmnt_cheat_vm.h @@ -30,7 +30,7 @@ #include "common/common_types.h" #include "core/memory/dmnt_cheat_types.h" -namespace Memory { +namespace Core::Memory { enum class CheatVmOpcodeType : u32 { StoreStatic = 0, @@ -318,4 +318,4 @@ private: MemoryAccessType mem_type, u64 rel_address); }; -}; // namespace Memory +}; // namespace Core::Memory diff --git a/src/core/reporter.cpp b/src/core/reporter.cpp index 85ac81ef7..558cbe6d7 100644 --- a/src/core/reporter.cpp +++ b/src/core/reporter.cpp @@ -16,9 +16,11 @@ #include "core/arm/arm_interface.h" #include "core/core.h" #include "core/hle/kernel/hle_ipc.h" +#include "core/hle/kernel/memory/page_table.h" #include "core/hle/kernel/process.h" #include "core/hle/result.h" #include "core/hle/service/lm/manager.h" +#include "core/memory.h" #include "core/reporter.h" #include "core/settings.h" @@ -108,14 +110,13 @@ json GetProcessorStateData(const std::string& architecture, u64 entry_point, u64 json GetProcessorStateDataAuto(Core::System& system) { const auto* process{system.CurrentProcess()}; - const auto& vm_manager{process->VMManager()}; auto& arm{system.CurrentArmInterface()}; Core::ARM_Interface::ThreadContext64 context{}; arm.SaveContext(context); return GetProcessorStateData(process->Is64BitProcess() ? "AArch64" : "AArch32", - vm_manager.GetCodeRegionBaseAddress(), context.sp, context.pc, + process->PageTable().GetCodeRegionStart(), context.sp, context.pc, context.pstate, context.cpu_registers); } @@ -147,7 +148,8 @@ json GetFullDataAuto(const std::string& timestamp, u64 title_id, Core::System& s } template <bool read_value, typename DescriptorType> -json GetHLEBufferDescriptorData(const std::vector<DescriptorType>& buffer, Memory::Memory& memory) { +json GetHLEBufferDescriptorData(const std::vector<DescriptorType>& buffer, + Core::Memory::Memory& memory) { auto buffer_out = json::array(); for (const auto& desc : buffer) { auto entry = json{ @@ -167,7 +169,7 @@ json GetHLEBufferDescriptorData(const std::vector<DescriptorType>& buffer, Memor return buffer_out; } -json GetHLERequestContextData(Kernel::HLERequestContext& ctx, Memory::Memory& memory) { +json GetHLERequestContextData(Kernel::HLERequestContext& ctx, Core::Memory::Memory& memory) { json out; auto cmd_buf = json::array(); diff --git a/src/core/settings.cpp b/src/core/settings.cpp index c1282cb80..cd6c257f5 100644 --- a/src/core/settings.cpp +++ b/src/core/settings.cpp @@ -92,7 +92,7 @@ void LogSettings() { LogSetting("Renderer_UseFrameLimit", Settings::values.use_frame_limit); LogSetting("Renderer_FrameLimit", Settings::values.frame_limit); LogSetting("Renderer_UseDiskShaderCache", Settings::values.use_disk_shader_cache); - LogSetting("Renderer_UseAccurateGpuEmulation", Settings::values.use_accurate_gpu_emulation); + LogSetting("Renderer_GPUAccuracyLevel", Settings::values.gpu_accuracy); LogSetting("Renderer_UseAsynchronousGpuEmulation", Settings::values.use_asynchronous_gpu_emulation); LogSetting("Renderer_UseVsync", Settings::values.use_vsync); @@ -109,4 +109,12 @@ void LogSettings() { LogSetting("Services_BCATBoxcatLocal", Settings::values.bcat_boxcat_local); } +bool IsGPULevelExtreme() { + return values.gpu_accuracy == GPUAccuracy::Extreme; +} + +bool IsGPULevelHigh() { + return values.gpu_accuracy == GPUAccuracy::Extreme || values.gpu_accuracy == GPUAccuracy::High; +} + } // namespace Settings diff --git a/src/core/settings.h b/src/core/settings.h index 79ec01731..7d09253f5 100644 --- a/src/core/settings.h +++ b/src/core/settings.h @@ -376,6 +376,12 @@ enum class RendererBackend { Vulkan = 1, }; +enum class GPUAccuracy : u32 { + Normal = 0, + High = 1, + Extreme = 2, +}; + struct Values { // System bool use_docked_mode; @@ -436,7 +442,7 @@ struct Values { bool use_frame_limit; u16 frame_limit; bool use_disk_shader_cache; - bool use_accurate_gpu_emulation; + GPUAccuracy gpu_accuracy; bool use_asynchronous_gpu_emulation; bool use_vsync; bool force_30fps_mode; @@ -464,6 +470,7 @@ struct Values { bool dump_nso; bool reporting_services; bool quest_flag; + bool disable_cpu_opt; // BCAT std::string bcat_backend; @@ -479,6 +486,9 @@ struct Values { std::map<u64, std::vector<std::string>> disabled_addons; } extern values; +bool IsGPULevelExtreme(); +bool IsGPULevelHigh(); + void Apply(); void LogSettings(); } // namespace Settings diff --git a/src/core/telemetry_session.cpp b/src/core/telemetry_session.cpp index fd5a3ee9f..1c3b03a1c 100644 --- a/src/core/telemetry_session.cpp +++ b/src/core/telemetry_session.cpp @@ -56,6 +56,18 @@ static const char* TranslateRenderer(Settings::RendererBackend backend) { return "Unknown"; } +static const char* TranslateGPUAccuracyLevel(Settings::GPUAccuracy backend) { + switch (backend) { + case Settings::GPUAccuracy::Normal: + return "Normal"; + case Settings::GPUAccuracy::High: + return "High"; + case Settings::GPUAccuracy::Extreme: + return "Extreme"; + } + return "Unknown"; +} + u64 GetTelemetryId() { u64 telemetry_id{}; const std::string filename{FileUtil::GetUserPath(FileUtil::UserPath::ConfigDir) + @@ -184,8 +196,8 @@ void TelemetrySession::AddInitialInfo(Loader::AppLoader& app_loader) { AddField(field_type, "Renderer_UseFrameLimit", Settings::values.use_frame_limit); AddField(field_type, "Renderer_FrameLimit", Settings::values.frame_limit); AddField(field_type, "Renderer_UseDiskShaderCache", Settings::values.use_disk_shader_cache); - AddField(field_type, "Renderer_UseAccurateGpuEmulation", - Settings::values.use_accurate_gpu_emulation); + AddField(field_type, "Renderer_GPUAccuracyLevel", + TranslateGPUAccuracyLevel(Settings::values.gpu_accuracy)); AddField(field_type, "Renderer_UseAsynchronousGpuEmulation", Settings::values.use_asynchronous_gpu_emulation); AddField(field_type, "Renderer_UseVsync", Settings::values.use_vsync); diff --git a/src/core/tools/freezer.cpp b/src/core/tools/freezer.cpp index 1e060f009..b2c6c537e 100644 --- a/src/core/tools/freezer.cpp +++ b/src/core/tools/freezer.cpp @@ -16,7 +16,7 @@ namespace { constexpr s64 MEMORY_FREEZER_TICKS = static_cast<s64>(Core::Hardware::BASE_CLOCK_RATE / 60); -u64 MemoryReadWidth(Memory::Memory& memory, u32 width, VAddr addr) { +u64 MemoryReadWidth(Core::Memory::Memory& memory, u32 width, VAddr addr) { switch (width) { case 1: return memory.Read8(addr); @@ -32,7 +32,7 @@ u64 MemoryReadWidth(Memory::Memory& memory, u32 width, VAddr addr) { } } -void MemoryWriteWidth(Memory::Memory& memory, u32 width, VAddr addr, u64 value) { +void MemoryWriteWidth(Core::Memory::Memory& memory, u32 width, VAddr addr, u64 value) { switch (width) { case 1: memory.Write8(addr, static_cast<u8>(value)); @@ -53,7 +53,7 @@ void MemoryWriteWidth(Memory::Memory& memory, u32 width, VAddr addr, u64 value) } // Anonymous namespace -Freezer::Freezer(Core::Timing::CoreTiming& core_timing_, Memory::Memory& memory_) +Freezer::Freezer(Core::Timing::CoreTiming& core_timing_, Core::Memory::Memory& memory_) : core_timing{core_timing_}, memory{memory_} { event = Core::Timing::CreateEvent( "MemoryFreezer::FrameCallback", diff --git a/src/core/tools/freezer.h b/src/core/tools/freezer.h index 916339c6c..62fc6aa6c 100644 --- a/src/core/tools/freezer.h +++ b/src/core/tools/freezer.h @@ -16,7 +16,7 @@ class CoreTiming; struct EventType; } // namespace Core::Timing -namespace Memory { +namespace Core::Memory { class Memory; } @@ -38,7 +38,7 @@ public: u64 value; }; - explicit Freezer(Core::Timing::CoreTiming& core_timing_, Memory::Memory& memory_); + explicit Freezer(Core::Timing::CoreTiming& core_timing_, Core::Memory::Memory& memory_); ~Freezer(); // Enables or disables the entire memory freezer. @@ -82,7 +82,7 @@ private: std::shared_ptr<Core::Timing::EventType> event; Core::Timing::CoreTiming& core_timing; - Memory::Memory& memory; + Core::Memory::Memory& memory; }; } // namespace Tools diff --git a/src/tests/core/arm/arm_test_common.cpp b/src/tests/core/arm/arm_test_common.cpp index 17043346b..e54674d11 100644 --- a/src/tests/core/arm/arm_test_common.cpp +++ b/src/tests/core/arm/arm_test_common.cpp @@ -6,6 +6,7 @@ #include "common/page_table.h" #include "core/core.h" +#include "core/hle/kernel/memory/page_table.h" #include "core/hle/kernel/process.h" #include "core/memory.h" #include "tests/core/arm/arm_test_common.h" @@ -18,12 +19,7 @@ TestEnvironment::TestEnvironment(bool mutable_memory_) auto& system = Core::System::GetInstance(); auto process = Kernel::Process::Create(system, "", Kernel::Process::ProcessType::Userland); - page_table = &process->VMManager().page_table; - - std::fill(page_table->pointers.begin(), page_table->pointers.end(), nullptr); - page_table->special_regions.clear(); - std::fill(page_table->attributes.begin(), page_table->attributes.end(), - Common::PageType::Unmapped); + page_table = &process->PageTable().PageTableImpl(); system.Memory().MapIoRegion(*page_table, 0x00000000, 0x80000000, test_memory); system.Memory().MapIoRegion(*page_table, 0x80000000, 0x80000000, test_memory); diff --git a/src/video_core/CMakeLists.txt b/src/video_core/CMakeLists.txt index 258d58eba..8ede4ba9b 100644 --- a/src/video_core/CMakeLists.txt +++ b/src/video_core/CMakeLists.txt @@ -23,6 +23,7 @@ add_library(video_core STATIC engines/shader_bytecode.h engines/shader_header.h engines/shader_type.h + fence_manager.h gpu.cpp gpu.h gpu_asynch.cpp @@ -51,6 +52,8 @@ add_library(video_core STATIC renderer_opengl/gl_buffer_cache.h renderer_opengl/gl_device.cpp renderer_opengl/gl_device.h + renderer_opengl/gl_fence_manager.cpp + renderer_opengl/gl_fence_manager.h renderer_opengl/gl_framebuffer_cache.cpp renderer_opengl/gl_framebuffer_cache.h renderer_opengl/gl_rasterizer.cpp @@ -160,6 +163,8 @@ if (ENABLE_VULKAN) renderer_vulkan/fixed_pipeline_state.h renderer_vulkan/maxwell_to_vk.cpp renderer_vulkan/maxwell_to_vk.h + renderer_vulkan/nsight_aftermath_tracker.cpp + renderer_vulkan/nsight_aftermath_tracker.h renderer_vulkan/renderer_vulkan.h renderer_vulkan/renderer_vulkan.cpp renderer_vulkan/vk_blit_screen.cpp @@ -174,6 +179,8 @@ if (ENABLE_VULKAN) renderer_vulkan/vk_descriptor_pool.h renderer_vulkan/vk_device.cpp renderer_vulkan/vk_device.h + renderer_vulkan/vk_fence_manager.cpp + renderer_vulkan/vk_fence_manager.h renderer_vulkan/vk_graphics_pipeline.cpp renderer_vulkan/vk_graphics_pipeline.h renderer_vulkan/vk_image.cpp @@ -213,19 +220,30 @@ if (ENABLE_VULKAN) renderer_vulkan/wrapper.cpp renderer_vulkan/wrapper.h ) - - target_include_directories(video_core PRIVATE sirit ../../externals/Vulkan-Headers/include) - target_compile_definitions(video_core PRIVATE HAS_VULKAN) endif() create_target_directory_groups(video_core) target_link_libraries(video_core PUBLIC common core) target_link_libraries(video_core PRIVATE glad) + if (ENABLE_VULKAN) + target_include_directories(video_core PRIVATE sirit ../../externals/Vulkan-Headers/include) + target_compile_definitions(video_core PRIVATE HAS_VULKAN) target_link_libraries(video_core PRIVATE sirit) endif() +if (ENABLE_NSIGHT_AFTERMATH) + if (NOT DEFINED ENV{NSIGHT_AFTERMATH_SDK}) + message(ERROR "Environment variable NSIGHT_AFTERMATH_SDK has to be provided") + endif() + if (NOT WIN32) + message(ERROR "Nsight Aftermath doesn't support non-Windows platforms") + endif() + target_compile_definitions(video_core PRIVATE HAS_NSIGHT_AFTERMATH) + target_include_directories(video_core PRIVATE "$ENV{NSIGHT_AFTERMATH_SDK}/include") +endif() + if (MSVC) target_compile_options(video_core PRIVATE /we4267) else() diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h index 83e7a1cde..510f11089 100644 --- a/src/video_core/buffer_cache/buffer_cache.h +++ b/src/video_core/buffer_cache/buffer_cache.h @@ -5,6 +5,7 @@ #pragma once #include <array> +#include <list> #include <memory> #include <mutex> #include <unordered_map> @@ -18,8 +19,10 @@ #include "common/alignment.h" #include "common/common_types.h" +#include "common/logging/log.h" #include "core/core.h" #include "core/memory.h" +#include "core/settings.h" #include "video_core/buffer_cache/buffer_block.h" #include "video_core/buffer_cache/map_interval.h" #include "video_core/memory_manager.h" @@ -79,6 +82,9 @@ public: auto map = MapAddress(block, gpu_addr, cpu_addr, size); if (is_written) { map->MarkAsModified(true, GetModifiedTicks()); + if (Settings::IsGPULevelHigh() && Settings::values.use_asynchronous_gpu_emulation) { + MarkForAsyncFlush(map); + } if (!map->IsWritten()) { map->MarkAsWritten(true); MarkRegionAsWritten(map->GetStart(), map->GetEnd() - 1); @@ -137,11 +143,22 @@ public: }); for (auto& object : objects) { if (object->IsModified() && object->IsRegistered()) { + mutex.unlock(); FlushMap(object); + mutex.lock(); } } } + bool MustFlushRegion(VAddr addr, std::size_t size) { + std::lock_guard lock{mutex}; + + const std::vector<MapInterval> objects = GetMapsInRange(addr, size); + return std::any_of(objects.cbegin(), objects.cend(), [](const MapInterval& map) { + return map->IsModified() && map->IsRegistered(); + }); + } + /// Mark the specified region as being invalidated void InvalidateRegion(VAddr addr, u64 size) { std::lock_guard lock{mutex}; @@ -154,6 +171,77 @@ public: } } + void OnCPUWrite(VAddr addr, std::size_t size) { + std::lock_guard lock{mutex}; + + for (const auto& object : GetMapsInRange(addr, size)) { + if (object->IsMemoryMarked() && object->IsRegistered()) { + UnmarkMemory(object); + object->SetSyncPending(true); + marked_for_unregister.emplace_back(object); + } + } + } + + void SyncGuestHost() { + std::lock_guard lock{mutex}; + + for (const auto& object : marked_for_unregister) { + if (object->IsRegistered()) { + object->SetSyncPending(false); + Unregister(object); + } + } + marked_for_unregister.clear(); + } + + void CommitAsyncFlushes() { + if (uncommitted_flushes) { + auto commit_list = std::make_shared<std::list<MapInterval>>(); + for (auto& map : *uncommitted_flushes) { + if (map->IsRegistered() && map->IsModified()) { + // TODO(Blinkhawk): Implement backend asynchronous flushing + // AsyncFlushMap(map) + commit_list->push_back(map); + } + } + if (!commit_list->empty()) { + committed_flushes.push_back(commit_list); + } else { + committed_flushes.emplace_back(); + } + } else { + committed_flushes.emplace_back(); + } + uncommitted_flushes.reset(); + } + + bool ShouldWaitAsyncFlushes() const { + return !committed_flushes.empty() && committed_flushes.front() != nullptr; + } + + bool HasUncommittedFlushes() const { + return uncommitted_flushes != nullptr; + } + + void PopAsyncFlushes() { + if (committed_flushes.empty()) { + return; + } + auto& flush_list = committed_flushes.front(); + if (!flush_list) { + committed_flushes.pop_front(); + return; + } + for (MapInterval& map : *flush_list) { + if (map->IsRegistered()) { + // TODO(Blinkhawk): Replace this for reading the asynchronous flush + FlushMap(map); + } + } + committed_flushes.pop_front(); + } + virtual BufferType GetEmptyBuffer(std::size_t size) = 0; protected: @@ -196,17 +284,30 @@ protected: const IntervalType interval{new_map->GetStart(), new_map->GetEnd()}; mapped_addresses.insert({interval, new_map}); rasterizer.UpdatePagesCachedCount(cpu_addr, size, 1); + new_map->SetMemoryMarked(true); if (inherit_written) { MarkRegionAsWritten(new_map->GetStart(), new_map->GetEnd() - 1); new_map->MarkAsWritten(true); } } - /// Unregisters an object from the cache - void Unregister(MapInterval& map) { + void UnmarkMemory(const MapInterval& map) { + if (!map->IsMemoryMarked()) { + return; + } const std::size_t size = map->GetEnd() - map->GetStart(); rasterizer.UpdatePagesCachedCount(map->GetStart(), size, -1); + map->SetMemoryMarked(false); + } + + /// Unregisters an object from the cache + void Unregister(const MapInterval& map) { + UnmarkMemory(map); map->MarkAsRegistered(false); + if (map->IsSyncPending()) { + marked_for_unregister.remove(map); + map->SetSyncPending(false); + } if (map->IsWritten()) { UnmarkRegionAsWritten(map->GetStart(), map->GetEnd() - 1); } @@ -264,6 +365,9 @@ private: MapInterval new_map = CreateMap(new_start, new_end, new_gpu_addr); if (modified_inheritance) { new_map->MarkAsModified(true, GetModifiedTicks()); + if (Settings::IsGPULevelHigh() && Settings::values.use_asynchronous_gpu_emulation) { + MarkForAsyncFlush(new_map); + } } Register(new_map, write_inheritance); return new_map; @@ -450,6 +554,13 @@ private: return false; } + void MarkForAsyncFlush(MapInterval& map) { + if (!uncommitted_flushes) { + uncommitted_flushes = std::make_shared<std::unordered_set<MapInterval>>(); + } + uncommitted_flushes->insert(map); + } + VideoCore::RasterizerInterface& rasterizer; Core::System& system; @@ -479,6 +590,10 @@ private: u64 modified_ticks = 0; std::vector<u8> staging_buffer; + std::list<MapInterval> marked_for_unregister; + + std::shared_ptr<std::unordered_set<MapInterval>> uncommitted_flushes{}; + std::list<std::shared_ptr<std::list<MapInterval>>> committed_flushes; std::recursive_mutex mutex; }; diff --git a/src/video_core/buffer_cache/map_interval.h b/src/video_core/buffer_cache/map_interval.h index b0956029d..29d8b26f3 100644 --- a/src/video_core/buffer_cache/map_interval.h +++ b/src/video_core/buffer_cache/map_interval.h @@ -46,6 +46,22 @@ public: return is_registered; } + void SetMemoryMarked(bool is_memory_marked_) { + is_memory_marked = is_memory_marked_; + } + + bool IsMemoryMarked() const { + return is_memory_marked; + } + + void SetSyncPending(bool is_sync_pending_) { + is_sync_pending = is_sync_pending_; + } + + bool IsSyncPending() const { + return is_sync_pending; + } + VAddr GetStart() const { return start; } @@ -83,6 +99,8 @@ private: bool is_written{}; bool is_modified{}; bool is_registered{}; + bool is_memory_marked{}; + bool is_sync_pending{}; u64 ticks{}; }; diff --git a/src/video_core/dma_pusher.cpp b/src/video_core/dma_pusher.cpp index 713c14182..324dafdcd 100644 --- a/src/video_core/dma_pusher.cpp +++ b/src/video_core/dma_pusher.cpp @@ -12,7 +12,7 @@ namespace Tegra { -DmaPusher::DmaPusher(GPU& gpu) : gpu(gpu) {} +DmaPusher::DmaPusher(Core::System& system, GPU& gpu) : gpu{gpu}, system{system} {} DmaPusher::~DmaPusher() = default; @@ -21,17 +21,20 @@ MICROPROFILE_DEFINE(DispatchCalls, "GPU", "Execute command buffer", MP_RGB(128, void DmaPusher::DispatchCalls() { MICROPROFILE_SCOPE(DispatchCalls); + gpu.SyncGuestHost(); // On entering GPU code, assume all memory may be touched by the ARM core. gpu.Maxwell3D().OnMemoryWrite(); dma_pushbuffer_subindex = 0; - while (Core::System::GetInstance().IsPoweredOn()) { + while (system.IsPoweredOn()) { if (!Step()) { break; } } gpu.FlushCommands(); + gpu.SyncGuestHost(); + gpu.OnCommandListEnd(); } bool DmaPusher::Step() { diff --git a/src/video_core/dma_pusher.h b/src/video_core/dma_pusher.h index 6ab06518f..d6188614a 100644 --- a/src/video_core/dma_pusher.h +++ b/src/video_core/dma_pusher.h @@ -10,6 +10,10 @@ #include "common/bit_field.h" #include "common/common_types.h" +namespace Core { +class System; +} + namespace Tegra { enum class SubmissionMode : u32 { @@ -56,7 +60,7 @@ using CommandList = std::vector<Tegra::CommandListHeader>; */ class DmaPusher { public: - explicit DmaPusher(GPU& gpu); + explicit DmaPusher(Core::System& system, GPU& gpu); ~DmaPusher(); void Push(CommandList&& entries) { @@ -72,8 +76,6 @@ private: void CallMethod(u32 argument) const; - GPU& gpu; - std::vector<CommandHeader> command_headers; ///< Buffer for list of commands fetched at once std::queue<CommandList> dma_pushbuffer; ///< Queue of command lists to be processed @@ -92,6 +94,9 @@ private: GPUVAddr dma_mget{}; ///< main pushbuffer last read address bool ib_enable{true}; ///< IB mode enabled + + GPU& gpu; + Core::System& system; }; } // namespace Tegra diff --git a/src/video_core/engines/maxwell_3d.cpp b/src/video_core/engines/maxwell_3d.cpp index ba63b44b4..2824ed707 100644 --- a/src/video_core/engines/maxwell_3d.cpp +++ b/src/video_core/engines/maxwell_3d.cpp @@ -92,6 +92,10 @@ void Maxwell3D::InitializeRegisterDefaults() { color_mask.A.Assign(1); } + for (auto& format : regs.vertex_attrib_format) { + format.constant.Assign(1); + } + // NVN games expect these values to be enabled at boot regs.rasterize_enable = 1; regs.rt_separate_frag_data = 1; @@ -400,7 +404,11 @@ void Maxwell3D::ProcessQueryGet() { switch (regs.query.query_get.operation) { case Regs::QueryOperation::Release: - StampQueryResult(regs.query.query_sequence, regs.query.query_get.short_query == 0); + if (regs.query.query_get.fence == 1) { + rasterizer.SignalSemaphore(regs.query.QueryAddress(), regs.query.query_sequence); + } else { + StampQueryResult(regs.query.query_sequence, regs.query.query_get.short_query == 0); + } break; case Regs::QueryOperation::Acquire: // TODO(Blinkhawk): Under this operation, the GPU waits for the CPU to write a value that @@ -479,7 +487,7 @@ void Maxwell3D::ProcessSyncPoint() { const u32 increment = regs.sync_info.increment.Value(); [[maybe_unused]] const u32 cache_flush = regs.sync_info.unknown.Value(); if (increment) { - system.GPU().IncrementSyncPoint(sync_point); + rasterizer.SignalSyncPoint(sync_point); } } diff --git a/src/video_core/engines/maxwell_3d.h b/src/video_core/engines/maxwell_3d.h index 5cf6a4cc3..59d5752d2 100644 --- a/src/video_core/engines/maxwell_3d.h +++ b/src/video_core/engines/maxwell_3d.h @@ -1149,7 +1149,7 @@ public: /// Returns whether the vertex array specified by index is supposed to be /// accessed per instance or not. - bool IsInstancingEnabled(u32 index) const { + bool IsInstancingEnabled(std::size_t index) const { return is_instanced[index]; } } instanced_arrays; diff --git a/src/video_core/engines/maxwell_dma.cpp b/src/video_core/engines/maxwell_dma.cpp index c2610f992..3bfed6ab8 100644 --- a/src/video_core/engines/maxwell_dma.cpp +++ b/src/video_core/engines/maxwell_dma.cpp @@ -104,8 +104,13 @@ void MaxwellDMA::HandleCopy() { write_buffer.resize(dst_size); } - memory_manager.ReadBlock(source, read_buffer.data(), src_size); - memory_manager.ReadBlock(dest, write_buffer.data(), dst_size); + if (Settings::IsGPULevelExtreme()) { + memory_manager.ReadBlock(source, read_buffer.data(), src_size); + memory_manager.ReadBlock(dest, write_buffer.data(), dst_size); + } else { + memory_manager.ReadBlockUnsafe(source, read_buffer.data(), src_size); + memory_manager.ReadBlockUnsafe(dest, write_buffer.data(), dst_size); + } Texture::UnswizzleSubrect( regs.x_count, regs.y_count, regs.dst_pitch, regs.src_params.size_x, bytes_per_pixel, @@ -136,7 +141,7 @@ void MaxwellDMA::HandleCopy() { write_buffer.resize(dst_size); } - if (Settings::values.use_accurate_gpu_emulation) { + if (Settings::IsGPULevelExtreme()) { memory_manager.ReadBlock(source, read_buffer.data(), src_size); memory_manager.ReadBlock(dest, write_buffer.data(), dst_size); } else { diff --git a/src/video_core/fence_manager.h b/src/video_core/fence_manager.h new file mode 100644 index 000000000..dabd1588c --- /dev/null +++ b/src/video_core/fence_manager.h @@ -0,0 +1,170 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include <algorithm> +#include <array> +#include <memory> +#include <queue> + +#include "common/assert.h" +#include "common/common_types.h" +#include "core/core.h" +#include "core/memory.h" +#include "core/settings.h" +#include "video_core/gpu.h" +#include "video_core/memory_manager.h" +#include "video_core/rasterizer_interface.h" + +namespace VideoCommon { + +class FenceBase { +public: + FenceBase(u32 payload, bool is_stubbed) + : address{}, payload{payload}, is_semaphore{false}, is_stubbed{is_stubbed} {} + + FenceBase(GPUVAddr address, u32 payload, bool is_stubbed) + : address{address}, payload{payload}, is_semaphore{true}, is_stubbed{is_stubbed} {} + + GPUVAddr GetAddress() const { + return address; + } + + u32 GetPayload() const { + return payload; + } + + bool IsSemaphore() const { + return is_semaphore; + } + +private: + GPUVAddr address; + u32 payload; + bool is_semaphore; + +protected: + bool is_stubbed; +}; + +template <typename TFence, typename TTextureCache, typename TTBufferCache, typename TQueryCache> +class FenceManager { +public: + void SignalSemaphore(GPUVAddr addr, u32 value) { + TryReleasePendingFences(); + const bool should_flush = ShouldFlush(); + CommitAsyncFlushes(); + TFence new_fence = CreateFence(addr, value, !should_flush); + fences.push(new_fence); + QueueFence(new_fence); + if (should_flush) { + rasterizer.FlushCommands(); + } + rasterizer.SyncGuestHost(); + } + + void SignalSyncPoint(u32 value) { + TryReleasePendingFences(); + const bool should_flush = ShouldFlush(); + CommitAsyncFlushes(); + TFence new_fence = CreateFence(value, !should_flush); + fences.push(new_fence); + QueueFence(new_fence); + if (should_flush) { + rasterizer.FlushCommands(); + } + rasterizer.SyncGuestHost(); + } + + void WaitPendingFences() { + auto& gpu{system.GPU()}; + auto& memory_manager{gpu.MemoryManager()}; + while (!fences.empty()) { + TFence& current_fence = fences.front(); + if (ShouldWait()) { + WaitFence(current_fence); + } + PopAsyncFlushes(); + if (current_fence->IsSemaphore()) { + memory_manager.Write<u32>(current_fence->GetAddress(), current_fence->GetPayload()); + } else { + gpu.IncrementSyncPoint(current_fence->GetPayload()); + } + fences.pop(); + } + } + +protected: + FenceManager(Core::System& system, VideoCore::RasterizerInterface& rasterizer, + TTextureCache& texture_cache, TTBufferCache& buffer_cache, + TQueryCache& query_cache) + : system{system}, rasterizer{rasterizer}, texture_cache{texture_cache}, + buffer_cache{buffer_cache}, query_cache{query_cache} {} + + virtual ~FenceManager() {} + + /// Creates a Sync Point Fence Interface, does not create a backend fence if 'is_stubbed' is + /// true + virtual TFence CreateFence(u32 value, bool is_stubbed) = 0; + /// Creates a Semaphore Fence Interface, does not create a backend fence if 'is_stubbed' is true + virtual TFence CreateFence(GPUVAddr addr, u32 value, bool is_stubbed) = 0; + /// Queues a fence into the backend if the fence isn't stubbed. + virtual void QueueFence(TFence& fence) = 0; + /// Notifies that the backend fence has been signaled/reached in host GPU. + virtual bool IsFenceSignaled(TFence& fence) const = 0; + /// Waits until a fence has been signalled by the host GPU. + virtual void WaitFence(TFence& fence) = 0; + + Core::System& system; + VideoCore::RasterizerInterface& rasterizer; + TTextureCache& texture_cache; + TTBufferCache& buffer_cache; + TQueryCache& query_cache; + +private: + void TryReleasePendingFences() { + auto& gpu{system.GPU()}; + auto& memory_manager{gpu.MemoryManager()}; + while (!fences.empty()) { + TFence& current_fence = fences.front(); + if (ShouldWait() && !IsFenceSignaled(current_fence)) { + return; + } + PopAsyncFlushes(); + if (current_fence->IsSemaphore()) { + memory_manager.Write<u32>(current_fence->GetAddress(), current_fence->GetPayload()); + } else { + gpu.IncrementSyncPoint(current_fence->GetPayload()); + } + fences.pop(); + } + } + + bool ShouldWait() const { + return texture_cache.ShouldWaitAsyncFlushes() || buffer_cache.ShouldWaitAsyncFlushes() || + query_cache.ShouldWaitAsyncFlushes(); + } + + bool ShouldFlush() const { + return texture_cache.HasUncommittedFlushes() || buffer_cache.HasUncommittedFlushes() || + query_cache.HasUncommittedFlushes(); + } + + void PopAsyncFlushes() { + texture_cache.PopAsyncFlushes(); + buffer_cache.PopAsyncFlushes(); + query_cache.PopAsyncFlushes(); + } + + void CommitAsyncFlushes() { + texture_cache.CommitAsyncFlushes(); + buffer_cache.CommitAsyncFlushes(); + query_cache.CommitAsyncFlushes(); + } + + std::queue<TFence> fences; +}; + +} // namespace VideoCommon diff --git a/src/video_core/gpu.cpp b/src/video_core/gpu.cpp index 8acf2eda2..3b7572d61 100644 --- a/src/video_core/gpu.cpp +++ b/src/video_core/gpu.cpp @@ -27,7 +27,7 @@ GPU::GPU(Core::System& system, std::unique_ptr<VideoCore::RendererBase>&& render : system{system}, renderer{std::move(renderer_)}, is_async{is_async} { auto& rasterizer{renderer->Rasterizer()}; memory_manager = std::make_unique<Tegra::MemoryManager>(system, rasterizer); - dma_pusher = std::make_unique<Tegra::DmaPusher>(*this); + dma_pusher = std::make_unique<Tegra::DmaPusher>(system, *this); maxwell_3d = std::make_unique<Engines::Maxwell3D>(system, rasterizer, *memory_manager); fermi_2d = std::make_unique<Engines::Fermi2D>(rasterizer); kepler_compute = std::make_unique<Engines::KeplerCompute>(system, rasterizer, *memory_manager); @@ -125,6 +125,28 @@ bool GPU::CancelSyncptInterrupt(const u32 syncpoint_id, const u32 value) { return true; } +u64 GPU::RequestFlush(VAddr addr, std::size_t size) { + std::unique_lock lck{flush_request_mutex}; + const u64 fence = ++last_flush_fence; + flush_requests.emplace_back(fence, addr, size); + return fence; +} + +void GPU::TickWork() { + std::unique_lock lck{flush_request_mutex}; + while (!flush_requests.empty()) { + auto& request = flush_requests.front(); + const u64 fence = request.fence; + const VAddr addr = request.addr; + const std::size_t size = request.size; + flush_requests.pop_front(); + flush_request_mutex.unlock(); + renderer->Rasterizer().FlushRegion(addr, size); + current_flush_fence.store(fence); + flush_request_mutex.lock(); + } +} + u64 GPU::GetTicks() const { // This values were reversed engineered by fincs from NVN // The gpu clock is reported in units of 385/625 nanoseconds @@ -142,6 +164,13 @@ void GPU::FlushCommands() { renderer->Rasterizer().FlushCommands(); } +void GPU::SyncGuestHost() { + renderer->Rasterizer().SyncGuestHost(); +} + +void GPU::OnCommandListEnd() { + renderer->Rasterizer().ReleaseFences(); +} // Note that, traditionally, methods are treated as 4-byte addressable locations, and hence // their numbers are written down multiplied by 4 in Docs. Here we are not multiply by 4. // So the values you see in docs might be multiplied by 4. diff --git a/src/video_core/gpu.h b/src/video_core/gpu.h index 1a2d747be..5e3eb94e9 100644 --- a/src/video_core/gpu.h +++ b/src/video_core/gpu.h @@ -155,7 +155,23 @@ public: /// Calls a GPU method. void CallMethod(const MethodCall& method_call); + /// Flush all current written commands into the host GPU for execution. void FlushCommands(); + /// Synchronizes CPU writes with Host GPU memory. + void SyncGuestHost(); + /// Signal the ending of command list. + virtual void OnCommandListEnd(); + + /// Request a host GPU memory flush from the CPU. + u64 RequestFlush(VAddr addr, std::size_t size); + + /// Obtains current flush request fence id. + u64 CurrentFlushRequestFence() const { + return current_flush_fence.load(std::memory_order_relaxed); + } + + /// Tick pending requests within the GPU. + void TickWork(); /// Returns a reference to the Maxwell3D GPU engine. Engines::Maxwell3D& Maxwell3D(); @@ -325,6 +341,19 @@ private: std::condition_variable sync_cv; + struct FlushRequest { + FlushRequest(u64 fence, VAddr addr, std::size_t size) + : fence{fence}, addr{addr}, size{size} {} + u64 fence; + VAddr addr; + std::size_t size; + }; + + std::list<FlushRequest> flush_requests; + std::atomic<u64> current_flush_fence{}; + u64 last_flush_fence{}; + std::mutex flush_request_mutex; + const bool is_async; }; diff --git a/src/video_core/gpu_asynch.cpp b/src/video_core/gpu_asynch.cpp index 20e73a37e..53305ab43 100644 --- a/src/video_core/gpu_asynch.cpp +++ b/src/video_core/gpu_asynch.cpp @@ -52,4 +52,8 @@ void GPUAsynch::WaitIdle() const { gpu_thread.WaitIdle(); } +void GPUAsynch::OnCommandListEnd() { + gpu_thread.OnCommandListEnd(); +} + } // namespace VideoCommon diff --git a/src/video_core/gpu_asynch.h b/src/video_core/gpu_asynch.h index 03fd0eef0..517658612 100644 --- a/src/video_core/gpu_asynch.h +++ b/src/video_core/gpu_asynch.h @@ -32,6 +32,8 @@ public: void FlushAndInvalidateRegion(VAddr addr, u64 size) override; void WaitIdle() const override; + void OnCommandListEnd() override; + protected: void TriggerCpuInterrupt(u32 syncpoint_id, u32 value) const override; diff --git a/src/video_core/gpu_thread.cpp b/src/video_core/gpu_thread.cpp index 10cda686b..c3bb4fe06 100644 --- a/src/video_core/gpu_thread.cpp +++ b/src/video_core/gpu_thread.cpp @@ -6,6 +6,7 @@ #include "common/microprofile.h" #include "core/core.h" #include "core/frontend/emu_window.h" +#include "core/settings.h" #include "video_core/dma_pusher.h" #include "video_core/gpu.h" #include "video_core/gpu_thread.h" @@ -14,8 +15,9 @@ namespace VideoCommon::GPUThread { /// Runs the GPU thread -static void RunThread(VideoCore::RendererBase& renderer, Core::Frontend::GraphicsContext& context, - Tegra::DmaPusher& dma_pusher, SynchState& state) { +static void RunThread(Core::System& system, VideoCore::RendererBase& renderer, + Core::Frontend::GraphicsContext& context, Tegra::DmaPusher& dma_pusher, + SynchState& state) { MicroProfileOnThreadCreate("GpuThread"); // Wait for first GPU command before acquiring the window context @@ -37,10 +39,14 @@ static void RunThread(VideoCore::RendererBase& renderer, Core::Frontend::Graphic dma_pusher.DispatchCalls(); } else if (const auto data = std::get_if<SwapBuffersCommand>(&next.data)) { renderer.SwapBuffers(data->framebuffer ? &*data->framebuffer : nullptr); + } else if (const auto data = std::get_if<OnCommandListEndCommand>(&next.data)) { + renderer.Rasterizer().ReleaseFences(); + } else if (const auto data = std::get_if<GPUTickCommand>(&next.data)) { + system.GPU().TickWork(); } else if (const auto data = std::get_if<FlushRegionCommand>(&next.data)) { renderer.Rasterizer().FlushRegion(data->addr, data->size); } else if (const auto data = std::get_if<InvalidateRegionCommand>(&next.data)) { - renderer.Rasterizer().InvalidateRegion(data->addr, data->size); + renderer.Rasterizer().OnCPUWrite(data->addr, data->size); } else if (std::holds_alternative<EndProcessingCommand>(next.data)) { return; } else { @@ -65,8 +71,8 @@ ThreadManager::~ThreadManager() { void ThreadManager::StartThread(VideoCore::RendererBase& renderer, Core::Frontend::GraphicsContext& context, Tegra::DmaPusher& dma_pusher) { - thread = std::thread{RunThread, std::ref(renderer), std::ref(context), std::ref(dma_pusher), - std::ref(state)}; + thread = std::thread{RunThread, std::ref(system), std::ref(renderer), + std::ref(context), std::ref(dma_pusher), std::ref(state)}; } void ThreadManager::SubmitList(Tegra::CommandList&& entries) { @@ -78,16 +84,29 @@ void ThreadManager::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) { } void ThreadManager::FlushRegion(VAddr addr, u64 size) { - PushCommand(FlushRegionCommand(addr, size)); + if (!Settings::IsGPULevelHigh()) { + PushCommand(FlushRegionCommand(addr, size)); + return; + } + if (!Settings::IsGPULevelExtreme()) { + return; + } + if (system.Renderer().Rasterizer().MustFlushRegion(addr, size)) { + auto& gpu = system.GPU(); + u64 fence = gpu.RequestFlush(addr, size); + PushCommand(GPUTickCommand()); + while (fence > gpu.CurrentFlushRequestFence()) { + } + } } void ThreadManager::InvalidateRegion(VAddr addr, u64 size) { - system.Renderer().Rasterizer().InvalidateRegion(addr, size); + system.Renderer().Rasterizer().OnCPUWrite(addr, size); } void ThreadManager::FlushAndInvalidateRegion(VAddr addr, u64 size) { // Skip flush on asynch mode, as FlushAndInvalidateRegion is not used for anything too important - InvalidateRegion(addr, size); + system.Renderer().Rasterizer().OnCPUWrite(addr, size); } void ThreadManager::WaitIdle() const { @@ -95,6 +114,10 @@ void ThreadManager::WaitIdle() const { } } +void ThreadManager::OnCommandListEnd() { + PushCommand(OnCommandListEndCommand()); +} + u64 ThreadManager::PushCommand(CommandData&& command_data) { const u64 fence{++state.last_fence}; state.queue.Push(CommandDataContainer(std::move(command_data), fence)); diff --git a/src/video_core/gpu_thread.h b/src/video_core/gpu_thread.h index cd74ad330..5a28335d6 100644 --- a/src/video_core/gpu_thread.h +++ b/src/video_core/gpu_thread.h @@ -70,9 +70,16 @@ struct FlushAndInvalidateRegionCommand final { u64 size; }; +/// Command called within the gpu, to schedule actions after a command list end +struct OnCommandListEndCommand final {}; + +/// Command to make the gpu look into pending requests +struct GPUTickCommand final {}; + using CommandData = std::variant<EndProcessingCommand, SubmitListCommand, SwapBuffersCommand, FlushRegionCommand, - InvalidateRegionCommand, FlushAndInvalidateRegionCommand>; + InvalidateRegionCommand, FlushAndInvalidateRegionCommand, OnCommandListEndCommand, + GPUTickCommand>; struct CommandDataContainer { CommandDataContainer() = default; @@ -122,6 +129,8 @@ public: // Wait until the gpu thread is idle. void WaitIdle() const; + void OnCommandListEnd(); + private: /// Pushes a command to be executed by the GPU thread u64 PushCommand(CommandData&& command_data); diff --git a/src/video_core/memory_manager.cpp b/src/video_core/memory_manager.cpp index a3389d0d2..fd49bc2a9 100644 --- a/src/video_core/memory_manager.cpp +++ b/src/video_core/memory_manager.cpp @@ -6,8 +6,8 @@ #include "common/assert.h" #include "common/logging/log.h" #include "core/core.h" +#include "core/hle/kernel/memory/page_table.h" #include "core/hle/kernel/process.h" -#include "core/hle/kernel/vm_manager.h" #include "core/memory.h" #include "video_core/gpu.h" #include "video_core/memory_manager.h" @@ -17,10 +17,7 @@ namespace Tegra { MemoryManager::MemoryManager(Core::System& system, VideoCore::RasterizerInterface& rasterizer) : rasterizer{rasterizer}, system{system} { - std::fill(page_table.pointers.begin(), page_table.pointers.end(), nullptr); - std::fill(page_table.attributes.begin(), page_table.attributes.end(), - Common::PageType::Unmapped); - page_table.Resize(address_space_width); + page_table.Resize(address_space_width, page_bits, false); // Initialize the map with a single free region covering the entire managed space. VirtualMemoryArea initial_vma; @@ -55,9 +52,9 @@ GPUVAddr MemoryManager::MapBufferEx(VAddr cpu_addr, u64 size) { MapBackingMemory(gpu_addr, system.Memory().GetPointer(cpu_addr), aligned_size, cpu_addr); ASSERT(system.CurrentProcess() - ->VMManager() - .SetMemoryAttribute(cpu_addr, size, Kernel::MemoryAttribute::DeviceMapped, - Kernel::MemoryAttribute::DeviceMapped) + ->PageTable() + .SetMemoryAttribute(cpu_addr, size, Kernel::Memory::MemoryAttribute::DeviceShared, + Kernel::Memory::MemoryAttribute::DeviceShared) .IsSuccess()); return gpu_addr; @@ -70,9 +67,9 @@ GPUVAddr MemoryManager::MapBufferEx(VAddr cpu_addr, GPUVAddr gpu_addr, u64 size) MapBackingMemory(gpu_addr, system.Memory().GetPointer(cpu_addr), aligned_size, cpu_addr); ASSERT(system.CurrentProcess() - ->VMManager() - .SetMemoryAttribute(cpu_addr, size, Kernel::MemoryAttribute::DeviceMapped, - Kernel::MemoryAttribute::DeviceMapped) + ->PageTable() + .SetMemoryAttribute(cpu_addr, size, Kernel::Memory::MemoryAttribute::DeviceShared, + Kernel::Memory::MemoryAttribute::DeviceShared) .IsSuccess()); return gpu_addr; } @@ -89,9 +86,10 @@ GPUVAddr MemoryManager::UnmapBuffer(GPUVAddr gpu_addr, u64 size) { UnmapRange(gpu_addr, aligned_size); ASSERT(system.CurrentProcess() - ->VMManager() - .SetMemoryAttribute(cpu_addr.value(), size, Kernel::MemoryAttribute::DeviceMapped, - Kernel::MemoryAttribute::None) + ->PageTable() + .SetMemoryAttribute(cpu_addr.value(), size, + Kernel::Memory::MemoryAttribute::DeviceShared, + Kernel::Memory::MemoryAttribute::None) .IsSuccess()); return gpu_addr; @@ -147,16 +145,8 @@ T MemoryManager::Read(GPUVAddr addr) const { return value; } - switch (page_table.attributes[addr >> page_bits]) { - case Common::PageType::Unmapped: - LOG_ERROR(HW_GPU, "Unmapped Read{} @ 0x{:08X}", sizeof(T) * 8, addr); - return 0; - case Common::PageType::Memory: - ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", addr); - break; - default: - UNREACHABLE(); - } + UNREACHABLE(); + return {}; } @@ -173,17 +163,7 @@ void MemoryManager::Write(GPUVAddr addr, T data) { return; } - switch (page_table.attributes[addr >> page_bits]) { - case Common::PageType::Unmapped: - LOG_ERROR(HW_GPU, "Unmapped Write{} 0x{:08X} @ 0x{:016X}", sizeof(data) * 8, - static_cast<u32>(data), addr); - return; - case Common::PageType::Memory: - ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", addr); - break; - default: - UNREACHABLE(); - } + UNREACHABLE(); } template u8 MemoryManager::Read<u8>(GPUVAddr addr) const; @@ -249,18 +229,11 @@ void MemoryManager::ReadBlock(GPUVAddr src_addr, void* dest_buffer, const std::s const std::size_t copy_amount{ std::min(static_cast<std::size_t>(page_size) - page_offset, remaining_size)}; - switch (page_table.attributes[page_index]) { - case Common::PageType::Memory: { - const VAddr src_addr{page_table.backing_addr[page_index] + page_offset}; - // Flush must happen on the rasterizer interface, such that memory is always synchronous - // when it is read (even when in asynchronous GPU mode). Fixes Dead Cells title menu. - rasterizer.FlushRegion(src_addr, copy_amount); - memory.ReadBlockUnsafe(src_addr, dest_buffer, copy_amount); - break; - } - default: - UNREACHABLE(); - } + const VAddr src_addr{page_table.backing_addr[page_index] + page_offset}; + // Flush must happen on the rasterizer interface, such that memory is always synchronous + // when it is read (even when in asynchronous GPU mode). Fixes Dead Cells title menu. + rasterizer.FlushRegion(src_addr, copy_amount); + memory.ReadBlockUnsafe(src_addr, dest_buffer, copy_amount); page_index++; page_offset = 0; @@ -305,18 +278,11 @@ void MemoryManager::WriteBlock(GPUVAddr dest_addr, const void* src_buffer, const const std::size_t copy_amount{ std::min(static_cast<std::size_t>(page_size) - page_offset, remaining_size)}; - switch (page_table.attributes[page_index]) { - case Common::PageType::Memory: { - const VAddr dest_addr{page_table.backing_addr[page_index] + page_offset}; - // Invalidate must happen on the rasterizer interface, such that memory is always - // synchronous when it is written (even when in asynchronous GPU mode). - rasterizer.InvalidateRegion(dest_addr, copy_amount); - memory.WriteBlockUnsafe(dest_addr, src_buffer, copy_amount); - break; - } - default: - UNREACHABLE(); - } + const VAddr dest_addr{page_table.backing_addr[page_index] + page_offset}; + // Invalidate must happen on the rasterizer interface, such that memory is always + // synchronous when it is written (even when in asynchronous GPU mode). + rasterizer.InvalidateRegion(dest_addr, copy_amount); + memory.WriteBlockUnsafe(dest_addr, src_buffer, copy_amount); page_index++; page_offset = 0; @@ -362,8 +328,8 @@ void MemoryManager::CopyBlockUnsafe(GPUVAddr dest_addr, GPUVAddr src_addr, const bool MemoryManager::IsGranularRange(GPUVAddr gpu_addr, std::size_t size) { const VAddr addr = page_table.backing_addr[gpu_addr >> page_bits]; - const std::size_t page = (addr & Memory::PAGE_MASK) + size; - return page <= Memory::PAGE_SIZE; + const std::size_t page = (addr & Core::Memory::PAGE_MASK) + size; + return page <= Core::Memory::PAGE_SIZE; } void MemoryManager::MapPages(GPUVAddr base, u64 size, u8* memory, Common::PageType type, @@ -375,12 +341,13 @@ void MemoryManager::MapPages(GPUVAddr base, u64 size, u8* memory, Common::PageTy ASSERT_MSG(end <= page_table.pointers.size(), "out of range mapping at {:016X}", base + page_table.pointers.size()); - std::fill(page_table.attributes.begin() + base, page_table.attributes.begin() + end, type); - if (memory == nullptr) { - std::fill(page_table.pointers.begin() + base, page_table.pointers.begin() + end, memory); - std::fill(page_table.backing_addr.begin() + base, page_table.backing_addr.begin() + end, - backing_addr); + while (base != end) { + page_table.pointers[base] = nullptr; + page_table.backing_addr[base] = 0; + + base += 1; + } } else { while (base != end) { page_table.pointers[base] = memory; diff --git a/src/video_core/memory_manager.h b/src/video_core/memory_manager.h index 0d9468535..0ddd52d5a 100644 --- a/src/video_core/memory_manager.h +++ b/src/video_core/memory_manager.h @@ -179,7 +179,7 @@ private: /// End of address space, based on address space in bits. static constexpr GPUVAddr address_space_end{1ULL << address_space_width}; - Common::BackingPageTable page_table{page_bits}; + Common::PageTable page_table; VMAMap vma_map; VideoCore::RasterizerInterface& rasterizer; diff --git a/src/video_core/query_cache.h b/src/video_core/query_cache.h index 5ea2b01f2..2f75f8801 100644 --- a/src/video_core/query_cache.h +++ b/src/video_core/query_cache.h @@ -12,10 +12,12 @@ #include <mutex> #include <optional> #include <unordered_map> +#include <unordered_set> #include <vector> #include "common/assert.h" #include "core/core.h" +#include "core/settings.h" #include "video_core/engines/maxwell_3d.h" #include "video_core/gpu.h" #include "video_core/memory_manager.h" @@ -130,6 +132,9 @@ public: } query->BindCounter(Stream(type).Current(), timestamp); + if (Settings::values.use_asynchronous_gpu_emulation) { + AsyncFlushQuery(cpu_addr); + } } /// Updates counters from GPU state. Expected to be called once per draw, clear or dispatch. @@ -170,6 +175,37 @@ public: return streams[static_cast<std::size_t>(type)]; } + void CommitAsyncFlushes() { + committed_flushes.push_back(uncommitted_flushes); + uncommitted_flushes.reset(); + } + + bool HasUncommittedFlushes() const { + return uncommitted_flushes != nullptr; + } + + bool ShouldWaitAsyncFlushes() const { + if (committed_flushes.empty()) { + return false; + } + return committed_flushes.front() != nullptr; + } + + void PopAsyncFlushes() { + if (committed_flushes.empty()) { + return; + } + auto& flush_list = committed_flushes.front(); + if (!flush_list) { + committed_flushes.pop_front(); + return; + } + for (VAddr query_address : *flush_list) { + FlushAndRemoveRegion(query_address, 4); + } + committed_flushes.pop_front(); + } + protected: std::array<QueryPool, VideoCore::NumQueryTypes> query_pools; @@ -224,6 +260,13 @@ private: return found != std::end(contents) ? &*found : nullptr; } + void AsyncFlushQuery(VAddr addr) { + if (!uncommitted_flushes) { + uncommitted_flushes = std::make_shared<std::unordered_set<VAddr>>(); + } + uncommitted_flushes->insert(addr); + } + static constexpr std::uintptr_t PAGE_SIZE = 4096; static constexpr unsigned PAGE_SHIFT = 12; @@ -235,6 +278,9 @@ private: std::unordered_map<u64, std::vector<CachedQuery>> cached_queries; std::array<CounterStream, VideoCore::NumQueryTypes> streams; + + std::shared_ptr<std::unordered_set<VAddr>> uncommitted_flushes{}; + std::list<std::shared_ptr<std::unordered_set<VAddr>>> committed_flushes; }; template <class QueryCache, class HostCounter> diff --git a/src/video_core/rasterizer_accelerated.cpp b/src/video_core/rasterizer_accelerated.cpp index d01db97da..53622ca05 100644 --- a/src/video_core/rasterizer_accelerated.cpp +++ b/src/video_core/rasterizer_accelerated.cpp @@ -23,15 +23,15 @@ constexpr auto RangeFromInterval(Map& map, const Interval& interval) { } // Anonymous namespace -RasterizerAccelerated::RasterizerAccelerated(Memory::Memory& cpu_memory_) +RasterizerAccelerated::RasterizerAccelerated(Core::Memory::Memory& cpu_memory_) : cpu_memory{cpu_memory_} {} RasterizerAccelerated::~RasterizerAccelerated() = default; void RasterizerAccelerated::UpdatePagesCachedCount(VAddr addr, u64 size, int delta) { std::lock_guard lock{pages_mutex}; - const u64 page_start{addr >> Memory::PAGE_BITS}; - const u64 page_end{(addr + size + Memory::PAGE_SIZE - 1) >> Memory::PAGE_BITS}; + const u64 page_start{addr >> Core::Memory::PAGE_BITS}; + const u64 page_end{(addr + size + Core::Memory::PAGE_SIZE - 1) >> Core::Memory::PAGE_BITS}; // Interval maps will erase segments if count reaches 0, so if delta is negative we have to // subtract after iterating @@ -44,8 +44,8 @@ void RasterizerAccelerated::UpdatePagesCachedCount(VAddr addr, u64 size, int del const auto interval = pair.first & pages_interval; const int count = pair.second; - const VAddr interval_start_addr = boost::icl::first(interval) << Memory::PAGE_BITS; - const VAddr interval_end_addr = boost::icl::last_next(interval) << Memory::PAGE_BITS; + const VAddr interval_start_addr = boost::icl::first(interval) << Core::Memory::PAGE_BITS; + const VAddr interval_end_addr = boost::icl::last_next(interval) << Core::Memory::PAGE_BITS; const u64 interval_size = interval_end_addr - interval_start_addr; if (delta > 0 && count == delta) { diff --git a/src/video_core/rasterizer_accelerated.h b/src/video_core/rasterizer_accelerated.h index 315798e7c..91866d7dd 100644 --- a/src/video_core/rasterizer_accelerated.h +++ b/src/video_core/rasterizer_accelerated.h @@ -11,7 +11,7 @@ #include "common/common_types.h" #include "video_core/rasterizer_interface.h" -namespace Memory { +namespace Core::Memory { class Memory; } @@ -20,7 +20,7 @@ namespace VideoCore { /// Implements the shared part in GPU accelerated rasterizers in RasterizerInterface. class RasterizerAccelerated : public RasterizerInterface { public: - explicit RasterizerAccelerated(Memory::Memory& cpu_memory_); + explicit RasterizerAccelerated(Core::Memory::Memory& cpu_memory_); ~RasterizerAccelerated() override; void UpdatePagesCachedCount(VAddr addr, u64 size, int delta) override; @@ -30,7 +30,7 @@ private: CachedPageMap cached_pages; std::mutex pages_mutex; - Memory::Memory& cpu_memory; + Core::Memory::Memory& cpu_memory; }; } // namespace VideoCore diff --git a/src/video_core/rasterizer_interface.h b/src/video_core/rasterizer_interface.h index 8ae5b9c4e..603f61952 100644 --- a/src/video_core/rasterizer_interface.h +++ b/src/video_core/rasterizer_interface.h @@ -49,15 +49,33 @@ public: /// Records a GPU query and caches it virtual void Query(GPUVAddr gpu_addr, QueryType type, std::optional<u64> timestamp) = 0; + /// Signal a GPU based semaphore as a fence + virtual void SignalSemaphore(GPUVAddr addr, u32 value) = 0; + + /// Signal a GPU based syncpoint as a fence + virtual void SignalSyncPoint(u32 value) = 0; + + /// Release all pending fences. + virtual void ReleaseFences() = 0; + /// Notify rasterizer that all caches should be flushed to Switch memory virtual void FlushAll() = 0; /// Notify rasterizer that any caches of the specified region should be flushed to Switch memory virtual void FlushRegion(VAddr addr, u64 size) = 0; + /// Check if the the specified memory area requires flushing to CPU Memory. + virtual bool MustFlushRegion(VAddr addr, u64 size) = 0; + /// Notify rasterizer that any caches of the specified region should be invalidated virtual void InvalidateRegion(VAddr addr, u64 size) = 0; + /// Notify rasterizer that any caches of the specified region are desync with guest + virtual void OnCPUWrite(VAddr addr, u64 size) = 0; + + /// Sync memory between guest and host. + virtual void SyncGuestHost() = 0; + /// Notify rasterizer that any caches of the specified region should be flushed to Switch memory /// and invalidated virtual void FlushAndInvalidateRegion(VAddr addr, u64 size) = 0; diff --git a/src/video_core/renderer_opengl/gl_buffer_cache.cpp b/src/video_core/renderer_opengl/gl_buffer_cache.cpp index cb5792407..4efce0de7 100644 --- a/src/video_core/renderer_opengl/gl_buffer_cache.cpp +++ b/src/video_core/renderer_opengl/gl_buffer_cache.cpp @@ -52,7 +52,7 @@ Buffer OGLBufferCache::CreateBlock(VAddr cpu_addr, std::size_t size) { } void OGLBufferCache::WriteBarrier() { - glMemoryBarrier(GL_ALL_BARRIER_BITS); + glMemoryBarrier(GL_SHADER_STORAGE_BARRIER_BIT); } GLuint OGLBufferCache::ToHandle(const Buffer& buffer) { @@ -72,6 +72,7 @@ void OGLBufferCache::UploadBlockData(const Buffer& buffer, std::size_t offset, s void OGLBufferCache::DownloadBlockData(const Buffer& buffer, std::size_t offset, std::size_t size, u8* data) { MICROPROFILE_SCOPE(OpenGL_Buffer_Download); + glMemoryBarrier(GL_BUFFER_UPDATE_BARRIER_BIT); glGetNamedBufferSubData(buffer->GetHandle(), static_cast<GLintptr>(offset), static_cast<GLsizeiptr>(size), data); } diff --git a/src/video_core/renderer_opengl/gl_fence_manager.cpp b/src/video_core/renderer_opengl/gl_fence_manager.cpp new file mode 100644 index 000000000..99ddcb3f8 --- /dev/null +++ b/src/video_core/renderer_opengl/gl_fence_manager.cpp @@ -0,0 +1,72 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "common/assert.h" + +#include "video_core/renderer_opengl/gl_fence_manager.h" + +namespace OpenGL { + +GLInnerFence::GLInnerFence(u32 payload, bool is_stubbed) + : VideoCommon::FenceBase(payload, is_stubbed), sync_object{} {} + +GLInnerFence::GLInnerFence(GPUVAddr address, u32 payload, bool is_stubbed) + : VideoCommon::FenceBase(address, payload, is_stubbed), sync_object{} {} + +GLInnerFence::~GLInnerFence() = default; + +void GLInnerFence::Queue() { + if (is_stubbed) { + return; + } + ASSERT(sync_object.handle == 0); + sync_object.Create(); +} + +bool GLInnerFence::IsSignaled() const { + if (is_stubbed) { + return true; + } + ASSERT(sync_object.handle != 0); + GLsizei length; + GLint sync_status; + glGetSynciv(sync_object.handle, GL_SYNC_STATUS, sizeof(GLint), &length, &sync_status); + return sync_status == GL_SIGNALED; +} + +void GLInnerFence::Wait() { + if (is_stubbed) { + return; + } + ASSERT(sync_object.handle != 0); + glClientWaitSync(sync_object.handle, 0, GL_TIMEOUT_IGNORED); +} + +FenceManagerOpenGL::FenceManagerOpenGL(Core::System& system, + VideoCore::RasterizerInterface& rasterizer, + TextureCacheOpenGL& texture_cache, + OGLBufferCache& buffer_cache, QueryCache& query_cache) + : GenericFenceManager(system, rasterizer, texture_cache, buffer_cache, query_cache) {} + +Fence FenceManagerOpenGL::CreateFence(u32 value, bool is_stubbed) { + return std::make_shared<GLInnerFence>(value, is_stubbed); +} + +Fence FenceManagerOpenGL::CreateFence(GPUVAddr addr, u32 value, bool is_stubbed) { + return std::make_shared<GLInnerFence>(addr, value, is_stubbed); +} + +void FenceManagerOpenGL::QueueFence(Fence& fence) { + fence->Queue(); +} + +bool FenceManagerOpenGL::IsFenceSignaled(Fence& fence) const { + return fence->IsSignaled(); +} + +void FenceManagerOpenGL::WaitFence(Fence& fence) { + fence->Wait(); +} + +} // namespace OpenGL diff --git a/src/video_core/renderer_opengl/gl_fence_manager.h b/src/video_core/renderer_opengl/gl_fence_manager.h new file mode 100644 index 000000000..c917b3343 --- /dev/null +++ b/src/video_core/renderer_opengl/gl_fence_manager.h @@ -0,0 +1,53 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include <memory> +#include <glad/glad.h> + +#include "common/common_types.h" +#include "video_core/fence_manager.h" +#include "video_core/renderer_opengl/gl_buffer_cache.h" +#include "video_core/renderer_opengl/gl_query_cache.h" +#include "video_core/renderer_opengl/gl_resource_manager.h" +#include "video_core/renderer_opengl/gl_texture_cache.h" + +namespace OpenGL { + +class GLInnerFence : public VideoCommon::FenceBase { +public: + GLInnerFence(u32 payload, bool is_stubbed); + GLInnerFence(GPUVAddr address, u32 payload, bool is_stubbed); + ~GLInnerFence(); + + void Queue(); + + bool IsSignaled() const; + + void Wait(); + +private: + OGLSync sync_object; +}; + +using Fence = std::shared_ptr<GLInnerFence>; +using GenericFenceManager = + VideoCommon::FenceManager<Fence, TextureCacheOpenGL, OGLBufferCache, QueryCache>; + +class FenceManagerOpenGL final : public GenericFenceManager { +public: + FenceManagerOpenGL(Core::System& system, VideoCore::RasterizerInterface& rasterizer, + TextureCacheOpenGL& texture_cache, OGLBufferCache& buffer_cache, + QueryCache& query_cache); + +protected: + Fence CreateFence(u32 value, bool is_stubbed) override; + Fence CreateFence(GPUVAddr addr, u32 value, bool is_stubbed) override; + void QueueFence(Fence& fence) override; + bool IsFenceSignaled(Fence& fence) const override; + void WaitFence(Fence& fence) override; +}; + +} // namespace OpenGL diff --git a/src/video_core/renderer_opengl/gl_query_cache.cpp b/src/video_core/renderer_opengl/gl_query_cache.cpp index f12e9f55f..d7ba57aca 100644 --- a/src/video_core/renderer_opengl/gl_query_cache.cpp +++ b/src/video_core/renderer_opengl/gl_query_cache.cpp @@ -94,9 +94,9 @@ CachedQuery::CachedQuery(CachedQuery&& rhs) noexcept : VideoCommon::CachedQueryBase<HostCounter>(std::move(rhs)), cache{rhs.cache}, type{rhs.type} {} CachedQuery& CachedQuery::operator=(CachedQuery&& rhs) noexcept { - VideoCommon::CachedQueryBase<HostCounter>::operator=(std::move(rhs)); cache = rhs.cache; type = rhs.type; + CachedQueryBase<HostCounter>::operator=(std::move(rhs)); return *this; } diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp index 175374f0d..4c16c89d2 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer.cpp +++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp @@ -99,9 +99,10 @@ RasterizerOpenGL::RasterizerOpenGL(Core::System& system, Core::Frontend::EmuWind ScreenInfo& info, GLShader::ProgramManager& program_manager, StateTracker& state_tracker) : RasterizerAccelerated{system.Memory()}, texture_cache{system, *this, device, state_tracker}, - shader_cache{*this, system, emu_window, device}, query_cache{system, *this}, system{system}, - screen_info{info}, program_manager{program_manager}, state_tracker{state_tracker}, - buffer_cache{*this, system, device, STREAM_BUFFER_SIZE} { + shader_cache{*this, system, emu_window, device}, query_cache{system, *this}, + buffer_cache{*this, system, device, STREAM_BUFFER_SIZE}, + fence_manager{system, *this, texture_cache, buffer_cache, query_cache}, system{system}, + screen_info{info}, program_manager{program_manager}, state_tracker{state_tracker} { CheckExtensions(); } @@ -599,6 +600,8 @@ void RasterizerOpenGL::Draw(bool is_indexed, bool is_instanced) { EndTransformFeedback(); ++num_queued_commands; + + system.GPU().TickWork(); } void RasterizerOpenGL::DispatchCompute(GPUVAddr code_addr) { @@ -649,6 +652,13 @@ void RasterizerOpenGL::FlushRegion(VAddr addr, u64 size) { query_cache.FlushRegion(addr, size); } +bool RasterizerOpenGL::MustFlushRegion(VAddr addr, u64 size) { + if (!Settings::IsGPULevelHigh()) { + return buffer_cache.MustFlushRegion(addr, size); + } + return texture_cache.MustFlushRegion(addr, size) || buffer_cache.MustFlushRegion(addr, size); +} + void RasterizerOpenGL::InvalidateRegion(VAddr addr, u64 size) { MICROPROFILE_SCOPE(OpenGL_CacheManagement); if (addr == 0 || size == 0) { @@ -660,8 +670,52 @@ void RasterizerOpenGL::InvalidateRegion(VAddr addr, u64 size) { query_cache.InvalidateRegion(addr, size); } +void RasterizerOpenGL::OnCPUWrite(VAddr addr, u64 size) { + MICROPROFILE_SCOPE(OpenGL_CacheManagement); + if (addr == 0 || size == 0) { + return; + } + texture_cache.OnCPUWrite(addr, size); + shader_cache.InvalidateRegion(addr, size); + buffer_cache.OnCPUWrite(addr, size); + query_cache.InvalidateRegion(addr, size); +} + +void RasterizerOpenGL::SyncGuestHost() { + MICROPROFILE_SCOPE(OpenGL_CacheManagement); + texture_cache.SyncGuestHost(); + buffer_cache.SyncGuestHost(); +} + +void RasterizerOpenGL::SignalSemaphore(GPUVAddr addr, u32 value) { + auto& gpu{system.GPU()}; + if (!gpu.IsAsync()) { + auto& memory_manager{gpu.MemoryManager()}; + memory_manager.Write<u32>(addr, value); + return; + } + fence_manager.SignalSemaphore(addr, value); +} + +void RasterizerOpenGL::SignalSyncPoint(u32 value) { + auto& gpu{system.GPU()}; + if (!gpu.IsAsync()) { + gpu.IncrementSyncPoint(value); + return; + } + fence_manager.SignalSyncPoint(value); +} + +void RasterizerOpenGL::ReleaseFences() { + auto& gpu{system.GPU()}; + if (!gpu.IsAsync()) { + return; + } + fence_manager.WaitPendingFences(); +} + void RasterizerOpenGL::FlushAndInvalidateRegion(VAddr addr, u64 size) { - if (Settings::values.use_accurate_gpu_emulation) { + if (Settings::IsGPULevelExtreme()) { FlushRegion(addr, size); } InvalidateRegion(addr, size); diff --git a/src/video_core/renderer_opengl/gl_rasterizer.h b/src/video_core/renderer_opengl/gl_rasterizer.h index caea174d2..ebd2173eb 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer.h +++ b/src/video_core/renderer_opengl/gl_rasterizer.h @@ -23,6 +23,7 @@ #include "video_core/rasterizer_interface.h" #include "video_core/renderer_opengl/gl_buffer_cache.h" #include "video_core/renderer_opengl/gl_device.h" +#include "video_core/renderer_opengl/gl_fence_manager.h" #include "video_core/renderer_opengl/gl_framebuffer_cache.h" #include "video_core/renderer_opengl/gl_query_cache.h" #include "video_core/renderer_opengl/gl_resource_manager.h" @@ -66,7 +67,13 @@ public: void Query(GPUVAddr gpu_addr, VideoCore::QueryType type, std::optional<u64> timestamp) override; void FlushAll() override; void FlushRegion(VAddr addr, u64 size) override; + bool MustFlushRegion(VAddr addr, u64 size) override; void InvalidateRegion(VAddr addr, u64 size) override; + void OnCPUWrite(VAddr addr, u64 size) override; + void SyncGuestHost() override; + void SignalSemaphore(GPUVAddr addr, u32 value) override; + void SignalSyncPoint(u32 value) override; + void ReleaseFences() override; void FlushAndInvalidateRegion(VAddr addr, u64 size) override; void FlushCommands() override; void TickFrame() override; @@ -222,6 +229,8 @@ private: SamplerCacheOpenGL sampler_cache; FramebufferCacheOpenGL framebuffer_cache; QueryCache query_cache; + OGLBufferCache buffer_cache; + FenceManagerOpenGL fence_manager; Core::System& system; ScreenInfo& screen_info; @@ -229,7 +238,6 @@ private: StateTracker& state_tracker; static constexpr std::size_t STREAM_BUFFER_SIZE = 128 * 1024 * 1024; - OGLBufferCache buffer_cache; GLint vertex_binding = 0; diff --git a/src/video_core/renderer_opengl/gl_shader_cache.cpp b/src/video_core/renderer_opengl/gl_shader_cache.cpp index 12c6dcfde..f63156b8d 100644 --- a/src/video_core/renderer_opengl/gl_shader_cache.cpp +++ b/src/video_core/renderer_opengl/gl_shader_cache.cpp @@ -34,8 +34,6 @@ namespace OpenGL { using Tegra::Engines::ShaderType; -using VideoCommon::Shader::CompileDepth; -using VideoCommon::Shader::CompilerSettings; using VideoCommon::Shader::ProgramCode; using VideoCommon::Shader::Registry; using VideoCommon::Shader::ShaderIR; @@ -45,7 +43,7 @@ namespace { constexpr u32 STAGE_MAIN_OFFSET = 10; constexpr u32 KERNEL_MAIN_OFFSET = 0; -constexpr CompilerSettings COMPILER_SETTINGS{CompileDepth::FullDecompile}; +constexpr VideoCommon::Shader::CompilerSettings COMPILER_SETTINGS{}; /// Gets the address for the specified shader stage program GPUVAddr GetShaderAddress(Core::System& system, Maxwell::ShaderProgram program) { @@ -450,7 +448,7 @@ Shader ShaderCacheOpenGL::GetStageProgram(Maxwell::ShaderProgram program) { // Look up shader in the cache based on address const auto cpu_addr{memory_manager.GpuToCpuAddress(address)}; - Shader shader{cpu_addr ? TryGet(*cpu_addr) : nullptr}; + Shader shader{cpu_addr ? TryGet(*cpu_addr) : null_shader}; if (shader) { return last_shaders[static_cast<std::size_t>(program)] = shader; } @@ -479,7 +477,12 @@ Shader ShaderCacheOpenGL::GetStageProgram(Maxwell::ShaderProgram program) { const std::size_t size_in_bytes = code.size() * sizeof(u64); shader = CachedShader::CreateFromCache(params, found->second, size_in_bytes); } - Register(shader); + + if (cpu_addr) { + Register(shader); + } else { + null_shader = shader; + } return last_shaders[static_cast<std::size_t>(program)] = shader; } @@ -488,7 +491,7 @@ Shader ShaderCacheOpenGL::GetComputeKernel(GPUVAddr code_addr) { auto& memory_manager{system.GPU().MemoryManager()}; const auto cpu_addr{memory_manager.GpuToCpuAddress(code_addr)}; - auto kernel = cpu_addr ? TryGet(*cpu_addr) : nullptr; + auto kernel = cpu_addr ? TryGet(*cpu_addr) : null_kernel; if (kernel) { return kernel; } @@ -509,7 +512,11 @@ Shader ShaderCacheOpenGL::GetComputeKernel(GPUVAddr code_addr) { kernel = CachedShader::CreateFromCache(params, found->second, size_in_bytes); } - Register(kernel); + if (cpu_addr) { + Register(kernel); + } else { + null_kernel = kernel; + } return kernel; } diff --git a/src/video_core/renderer_opengl/gl_shader_cache.h b/src/video_core/renderer_opengl/gl_shader_cache.h index c836df5bd..91690b470 100644 --- a/src/video_core/renderer_opengl/gl_shader_cache.h +++ b/src/video_core/renderer_opengl/gl_shader_cache.h @@ -125,6 +125,9 @@ private: ShaderDiskCacheOpenGL disk_cache; std::unordered_map<u64, PrecompiledShader> runtime_cache; + Shader null_shader{}; + Shader null_kernel{}; + std::array<Shader, Maxwell::MaxShaderProgram> last_shaders; }; diff --git a/src/video_core/renderer_opengl/gl_shader_decompiler.cpp b/src/video_core/renderer_opengl/gl_shader_decompiler.cpp index 9495f48a2..0cd3ad7e1 100644 --- a/src/video_core/renderer_opengl/gl_shader_decompiler.cpp +++ b/src/video_core/renderer_opengl/gl_shader_decompiler.cpp @@ -484,7 +484,7 @@ private: code.AddLine("switch (jmp_to) {{"); for (const auto& pair : ir.GetBasicBlocks()) { - const auto [address, bb] = pair; + const auto& [address, bb] = pair; code.AddLine("case 0x{:X}U: {{", address); ++code.scope; @@ -1145,6 +1145,7 @@ private: return {"gl_FragCoord"s + GetSwizzle(element), Type::Float}; default: UNREACHABLE(); + return {"0", Type::Int}; } case Attribute::Index::FrontColor: return {"gl_Color"s + GetSwizzle(element), Type::Float}; @@ -1483,8 +1484,8 @@ private: dy += '('; for (std::size_t index = 0; index < components; ++index) { - const auto operand_x{derivates.at(index * 2)}; - const auto operand_y{derivates.at(index * 2 + 1)}; + const auto& operand_x{derivates.at(index * 2)}; + const auto& operand_y{derivates.at(index * 2 + 1)}; dx += Visit(operand_x).AsFloat(); dy += Visit(operand_y).AsFloat(); diff --git a/src/video_core/renderer_opengl/maxwell_to_gl.h b/src/video_core/renderer_opengl/maxwell_to_gl.h index 89f0e04ef..2c0c77c28 100644 --- a/src/video_core/renderer_opengl/maxwell_to_gl.h +++ b/src/video_core/renderer_opengl/maxwell_to_gl.h @@ -191,6 +191,7 @@ inline GLenum TextureFilterMode(Tegra::Texture::TextureFilter filter_mode, case Tegra::Texture::TextureMipmapFilter::Linear: return GL_LINEAR_MIPMAP_LINEAR; } + break; } case Tegra::Texture::TextureFilter::Nearest: { switch (mip_filter_mode) { @@ -201,6 +202,7 @@ inline GLenum TextureFilterMode(Tegra::Texture::TextureFilter filter_mode, case Tegra::Texture::TextureMipmapFilter::Linear: return GL_NEAREST_MIPMAP_LINEAR; } + break; } } LOG_ERROR(Render_OpenGL, "Unimplemented texture filter mode={}", static_cast<u32>(filter_mode)); diff --git a/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp b/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp index 2bb376555..be1c31978 100644 --- a/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp +++ b/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp @@ -2,10 +2,12 @@ // Licensed under GPLv2 or any later version // Refer to the license.txt file included. +#include <cstring> #include <tuple> #include <boost/functional/hash.hpp> +#include "common/cityhash.h" #include "common/common_types.h" #include "video_core/renderer_vulkan/fixed_pipeline_state.h" @@ -13,289 +15,352 @@ namespace Vulkan { namespace { -constexpr FixedPipelineState::DepthStencil GetDepthStencilState(const Maxwell& regs) { - const FixedPipelineState::StencilFace front_stencil( - regs.stencil_front_op_fail, regs.stencil_front_op_zfail, regs.stencil_front_op_zpass, - regs.stencil_front_func_func); - const FixedPipelineState::StencilFace back_stencil = - regs.stencil_two_side_enable - ? FixedPipelineState::StencilFace(regs.stencil_back_op_fail, regs.stencil_back_op_zfail, - regs.stencil_back_op_zpass, - regs.stencil_back_func_func) - : front_stencil; - return FixedPipelineState::DepthStencil( - regs.depth_test_enable == 1, regs.depth_write_enabled == 1, regs.depth_bounds_enable == 1, - regs.stencil_enable == 1, regs.depth_test_func, front_stencil, back_stencil); -} - -constexpr FixedPipelineState::InputAssembly GetInputAssemblyState(const Maxwell& regs) { - return FixedPipelineState::InputAssembly( - regs.draw.topology, regs.primitive_restart.enabled, - regs.draw.topology == Maxwell::PrimitiveTopology::Points ? regs.point_size : 0.0f); -} - -constexpr FixedPipelineState::BlendingAttachment GetBlendingAttachmentState( - const Maxwell& regs, std::size_t render_target) { - const auto& mask = regs.color_mask[regs.color_mask_common ? 0 : render_target]; - const std::array components = {mask.R != 0, mask.G != 0, mask.B != 0, mask.A != 0}; - - const FixedPipelineState::BlendingAttachment default_blending( - false, Maxwell::Blend::Equation::Add, Maxwell::Blend::Factor::One, - Maxwell::Blend::Factor::Zero, Maxwell::Blend::Equation::Add, Maxwell::Blend::Factor::One, - Maxwell::Blend::Factor::Zero, components); - if (render_target >= regs.rt_control.count) { - return default_blending; - } +constexpr std::size_t POINT = 0; +constexpr std::size_t LINE = 1; +constexpr std::size_t POLYGON = 2; +constexpr std::array POLYGON_OFFSET_ENABLE_LUT = { + POINT, // Points + LINE, // Lines + LINE, // LineLoop + LINE, // LineStrip + POLYGON, // Triangles + POLYGON, // TriangleStrip + POLYGON, // TriangleFan + POLYGON, // Quads + POLYGON, // QuadStrip + POLYGON, // Polygon + LINE, // LinesAdjacency + LINE, // LineStripAdjacency + POLYGON, // TrianglesAdjacency + POLYGON, // TriangleStripAdjacency + POLYGON, // Patches +}; - if (!regs.independent_blend_enable) { - const auto& src = regs.blend; - if (!src.enable[render_target]) { - return default_blending; - } - return FixedPipelineState::BlendingAttachment( - true, src.equation_rgb, src.factor_source_rgb, src.factor_dest_rgb, src.equation_a, - src.factor_source_a, src.factor_dest_a, components); - } +} // Anonymous namespace - if (!regs.blend.enable[render_target]) { - return default_blending; +void FixedPipelineState::DepthStencil::Fill(const Maxwell& regs) noexcept { + raw = 0; + front.action_stencil_fail.Assign(PackStencilOp(regs.stencil_front_op_fail)); + front.action_depth_fail.Assign(PackStencilOp(regs.stencil_front_op_zfail)); + front.action_depth_pass.Assign(PackStencilOp(regs.stencil_front_op_zpass)); + front.test_func.Assign(PackComparisonOp(regs.stencil_front_func_func)); + if (regs.stencil_two_side_enable) { + back.action_stencil_fail.Assign(PackStencilOp(regs.stencil_back_op_fail)); + back.action_depth_fail.Assign(PackStencilOp(regs.stencil_back_op_zfail)); + back.action_depth_pass.Assign(PackStencilOp(regs.stencil_back_op_zpass)); + back.test_func.Assign(PackComparisonOp(regs.stencil_back_func_func)); + } else { + back.action_stencil_fail.Assign(front.action_stencil_fail); + back.action_depth_fail.Assign(front.action_depth_fail); + back.action_depth_pass.Assign(front.action_depth_pass); + back.test_func.Assign(front.test_func); } - const auto& src = regs.independent_blend[render_target]; - return FixedPipelineState::BlendingAttachment( - true, src.equation_rgb, src.factor_source_rgb, src.factor_dest_rgb, src.equation_a, - src.factor_source_a, src.factor_dest_a, components); + depth_test_enable.Assign(regs.depth_test_enable); + depth_write_enable.Assign(regs.depth_write_enabled); + depth_bounds_enable.Assign(regs.depth_bounds_enable); + stencil_enable.Assign(regs.stencil_enable); + depth_test_func.Assign(PackComparisonOp(regs.depth_test_func)); } -constexpr FixedPipelineState::ColorBlending GetColorBlendingState(const Maxwell& regs) { - return FixedPipelineState::ColorBlending( - {regs.blend_color.r, regs.blend_color.g, regs.blend_color.b, regs.blend_color.a}, - regs.rt_control.count, - {GetBlendingAttachmentState(regs, 0), GetBlendingAttachmentState(regs, 1), - GetBlendingAttachmentState(regs, 2), GetBlendingAttachmentState(regs, 3), - GetBlendingAttachmentState(regs, 4), GetBlendingAttachmentState(regs, 5), - GetBlendingAttachmentState(regs, 6), GetBlendingAttachmentState(regs, 7)}); -} - -constexpr FixedPipelineState::Tessellation GetTessellationState(const Maxwell& regs) { - return FixedPipelineState::Tessellation(regs.patch_vertices, regs.tess_mode.prim, - regs.tess_mode.spacing, regs.tess_mode.cw != 0); -} - -constexpr std::size_t Point = 0; -constexpr std::size_t Line = 1; -constexpr std::size_t Polygon = 2; -constexpr std::array PolygonOffsetEnableLUT = { - Point, // Points - Line, // Lines - Line, // LineLoop - Line, // LineStrip - Polygon, // Triangles - Polygon, // TriangleStrip - Polygon, // TriangleFan - Polygon, // Quads - Polygon, // QuadStrip - Polygon, // Polygon - Line, // LinesAdjacency - Line, // LineStripAdjacency - Polygon, // TrianglesAdjacency - Polygon, // TriangleStripAdjacency - Polygon, // Patches -}; - -constexpr FixedPipelineState::Rasterizer GetRasterizerState(const Maxwell& regs) { +void FixedPipelineState::Rasterizer::Fill(const Maxwell& regs) noexcept { + const auto& clip = regs.view_volume_clip_control; const std::array enabled_lut = {regs.polygon_offset_point_enable, regs.polygon_offset_line_enable, regs.polygon_offset_fill_enable}; - const auto topology = static_cast<std::size_t>(regs.draw.topology.Value()); - const bool depth_bias_enabled = enabled_lut[PolygonOffsetEnableLUT[topology]]; + const u32 topology_index = static_cast<u32>(regs.draw.topology.Value()); - const auto& clip = regs.view_volume_clip_control; - const bool depth_clamp_enabled = clip.depth_clamp_near == 1 || clip.depth_clamp_far == 1; - - Maxwell::FrontFace front_face = regs.front_face; + u32 packed_front_face = PackFrontFace(regs.front_face); if (regs.screen_y_control.triangle_rast_flip != 0 && regs.viewport_transform[0].scale_y > 0.0f) { - if (front_face == Maxwell::FrontFace::CounterClockWise) - front_face = Maxwell::FrontFace::ClockWise; - else if (front_face == Maxwell::FrontFace::ClockWise) - front_face = Maxwell::FrontFace::CounterClockWise; + // Flip front face + packed_front_face = 1 - packed_front_face; } - const bool gl_ndc = regs.depth_mode == Maxwell::DepthMode::MinusOneToOne; - return FixedPipelineState::Rasterizer(regs.cull_test_enabled, depth_bias_enabled, - depth_clamp_enabled, gl_ndc, regs.cull_face, front_face); + raw = 0; + topology.Assign(topology_index); + primitive_restart_enable.Assign(regs.primitive_restart.enabled != 0 ? 1 : 0); + cull_enable.Assign(regs.cull_test_enabled != 0 ? 1 : 0); + depth_bias_enable.Assign(enabled_lut[POLYGON_OFFSET_ENABLE_LUT[topology_index]] != 0 ? 1 : 0); + depth_clamp_enable.Assign(clip.depth_clamp_near == 1 || clip.depth_clamp_far == 1 ? 1 : 0); + ndc_minus_one_to_one.Assign(regs.depth_mode == Maxwell::DepthMode::MinusOneToOne ? 1 : 0); + cull_face.Assign(PackCullFace(regs.cull_face)); + front_face.Assign(packed_front_face); + polygon_mode.Assign(PackPolygonMode(regs.polygon_mode_front)); + patch_control_points_minus_one.Assign(regs.patch_vertices - 1); + tessellation_primitive.Assign(static_cast<u32>(regs.tess_mode.prim.Value())); + tessellation_spacing.Assign(static_cast<u32>(regs.tess_mode.spacing.Value())); + tessellation_clockwise.Assign(regs.tess_mode.cw.Value()); + logic_op_enable.Assign(regs.logic_op.enable != 0 ? 1 : 0); + logic_op.Assign(PackLogicOp(regs.logic_op.operation)); + std::memcpy(&point_size, ®s.point_size, sizeof(point_size)); // TODO: C++20 std::bit_cast } -} // Anonymous namespace - -std::size_t FixedPipelineState::VertexBinding::Hash() const noexcept { - return (index << stride) ^ divisor; +void FixedPipelineState::ColorBlending::Fill(const Maxwell& regs) noexcept { + for (std::size_t index = 0; index < std::size(attachments); ++index) { + attachments[index].Fill(regs, index); + } } -bool FixedPipelineState::VertexBinding::operator==(const VertexBinding& rhs) const noexcept { - return std::tie(index, stride, divisor) == std::tie(rhs.index, rhs.stride, rhs.divisor); -} +void FixedPipelineState::BlendingAttachment::Fill(const Maxwell& regs, std::size_t index) { + const auto& mask = regs.color_mask[regs.color_mask_common ? 0 : index]; + + raw = 0; + mask_r.Assign(mask.R); + mask_g.Assign(mask.G); + mask_b.Assign(mask.B); + mask_a.Assign(mask.A); + + // TODO: C++20 Use templated lambda to deduplicate code + + if (!regs.independent_blend_enable) { + const auto& src = regs.blend; + if (!src.enable[index]) { + return; + } + equation_rgb.Assign(PackBlendEquation(src.equation_rgb)); + equation_a.Assign(PackBlendEquation(src.equation_a)); + factor_source_rgb.Assign(PackBlendFactor(src.factor_source_rgb)); + factor_dest_rgb.Assign(PackBlendFactor(src.factor_dest_rgb)); + factor_source_a.Assign(PackBlendFactor(src.factor_source_a)); + factor_dest_a.Assign(PackBlendFactor(src.factor_dest_a)); + enable.Assign(1); + return; + } -std::size_t FixedPipelineState::VertexAttribute::Hash() const noexcept { - return static_cast<std::size_t>(index) ^ (static_cast<std::size_t>(buffer) << 13) ^ - (static_cast<std::size_t>(type) << 22) ^ (static_cast<std::size_t>(size) << 31) ^ - (static_cast<std::size_t>(offset) << 36); + if (!regs.blend.enable[index]) { + return; + } + const auto& src = regs.independent_blend[index]; + equation_rgb.Assign(PackBlendEquation(src.equation_rgb)); + equation_a.Assign(PackBlendEquation(src.equation_a)); + factor_source_rgb.Assign(PackBlendFactor(src.factor_source_rgb)); + factor_dest_rgb.Assign(PackBlendFactor(src.factor_dest_rgb)); + factor_source_a.Assign(PackBlendFactor(src.factor_source_a)); + factor_dest_a.Assign(PackBlendFactor(src.factor_dest_a)); + enable.Assign(1); } -bool FixedPipelineState::VertexAttribute::operator==(const VertexAttribute& rhs) const noexcept { - return std::tie(index, buffer, type, size, offset) == - std::tie(rhs.index, rhs.buffer, rhs.type, rhs.size, rhs.offset); +std::size_t FixedPipelineState::Hash() const noexcept { + const u64 hash = Common::CityHash64(reinterpret_cast<const char*>(this), sizeof *this); + return static_cast<std::size_t>(hash); } -std::size_t FixedPipelineState::StencilFace::Hash() const noexcept { - return static_cast<std::size_t>(action_stencil_fail) ^ - (static_cast<std::size_t>(action_depth_fail) << 4) ^ - (static_cast<std::size_t>(action_depth_fail) << 20) ^ - (static_cast<std::size_t>(action_depth_pass) << 36); +bool FixedPipelineState::operator==(const FixedPipelineState& rhs) const noexcept { + return std::memcmp(this, &rhs, sizeof *this) == 0; } -bool FixedPipelineState::StencilFace::operator==(const StencilFace& rhs) const noexcept { - return std::tie(action_stencil_fail, action_depth_fail, action_depth_pass, test_func) == - std::tie(rhs.action_stencil_fail, rhs.action_depth_fail, rhs.action_depth_pass, - rhs.test_func); +FixedPipelineState GetFixedPipelineState(const Maxwell& regs) { + FixedPipelineState fixed_state; + fixed_state.rasterizer.Fill(regs); + fixed_state.depth_stencil.Fill(regs); + fixed_state.color_blending.Fill(regs); + fixed_state.padding = {}; + return fixed_state; } -std::size_t FixedPipelineState::BlendingAttachment::Hash() const noexcept { - return static_cast<std::size_t>(enable) ^ (static_cast<std::size_t>(rgb_equation) << 5) ^ - (static_cast<std::size_t>(src_rgb_func) << 10) ^ - (static_cast<std::size_t>(dst_rgb_func) << 15) ^ - (static_cast<std::size_t>(a_equation) << 20) ^ - (static_cast<std::size_t>(src_a_func) << 25) ^ - (static_cast<std::size_t>(dst_a_func) << 30) ^ - (static_cast<std::size_t>(components[0]) << 35) ^ - (static_cast<std::size_t>(components[1]) << 36) ^ - (static_cast<std::size_t>(components[2]) << 37) ^ - (static_cast<std::size_t>(components[3]) << 38); +u32 FixedPipelineState::PackComparisonOp(Maxwell::ComparisonOp op) noexcept { + // OpenGL enums go from 0x200 to 0x207 and the others from 1 to 8 + // If we substract 0x200 to OpenGL enums and 1 to the others we get a 0-7 range. + // Perfect for a hash. + const u32 value = static_cast<u32>(op); + return value - (value >= 0x200 ? 0x200 : 1); } -bool FixedPipelineState::BlendingAttachment::operator==(const BlendingAttachment& rhs) const - noexcept { - return std::tie(enable, rgb_equation, src_rgb_func, dst_rgb_func, a_equation, src_a_func, - dst_a_func, components) == - std::tie(rhs.enable, rhs.rgb_equation, rhs.src_rgb_func, rhs.dst_rgb_func, - rhs.a_equation, rhs.src_a_func, rhs.dst_a_func, rhs.components); +Maxwell::ComparisonOp FixedPipelineState::UnpackComparisonOp(u32 packed) noexcept { + // Read PackComparisonOp for the logic behind this. + return static_cast<Maxwell::ComparisonOp>(packed + 1); } -std::size_t FixedPipelineState::VertexInput::Hash() const noexcept { - std::size_t hash = num_bindings ^ (num_attributes << 32); - for (std::size_t i = 0; i < num_bindings; ++i) { - boost::hash_combine(hash, bindings[i].Hash()); - } - for (std::size_t i = 0; i < num_attributes; ++i) { - boost::hash_combine(hash, attributes[i].Hash()); +u32 FixedPipelineState::PackStencilOp(Maxwell::StencilOp op) noexcept { + switch (op) { + case Maxwell::StencilOp::Keep: + case Maxwell::StencilOp::KeepOGL: + return 0; + case Maxwell::StencilOp::Zero: + case Maxwell::StencilOp::ZeroOGL: + return 1; + case Maxwell::StencilOp::Replace: + case Maxwell::StencilOp::ReplaceOGL: + return 2; + case Maxwell::StencilOp::Incr: + case Maxwell::StencilOp::IncrOGL: + return 3; + case Maxwell::StencilOp::Decr: + case Maxwell::StencilOp::DecrOGL: + return 4; + case Maxwell::StencilOp::Invert: + case Maxwell::StencilOp::InvertOGL: + return 5; + case Maxwell::StencilOp::IncrWrap: + case Maxwell::StencilOp::IncrWrapOGL: + return 6; + case Maxwell::StencilOp::DecrWrap: + case Maxwell::StencilOp::DecrWrapOGL: + return 7; } - return hash; + return 0; } -bool FixedPipelineState::VertexInput::operator==(const VertexInput& rhs) const noexcept { - return std::equal(bindings.begin(), bindings.begin() + num_bindings, rhs.bindings.begin(), - rhs.bindings.begin() + rhs.num_bindings) && - std::equal(attributes.begin(), attributes.begin() + num_attributes, - rhs.attributes.begin(), rhs.attributes.begin() + rhs.num_attributes); +Maxwell::StencilOp FixedPipelineState::UnpackStencilOp(u32 packed) noexcept { + static constexpr std::array LUT = {Maxwell::StencilOp::Keep, Maxwell::StencilOp::Zero, + Maxwell::StencilOp::Replace, Maxwell::StencilOp::Incr, + Maxwell::StencilOp::Decr, Maxwell::StencilOp::Invert, + Maxwell::StencilOp::IncrWrap, Maxwell::StencilOp::DecrWrap}; + return LUT[packed]; } -std::size_t FixedPipelineState::InputAssembly::Hash() const noexcept { - std::size_t point_size_int = 0; - std::memcpy(&point_size_int, &point_size, sizeof(point_size)); - return (static_cast<std::size_t>(topology) << 24) ^ (point_size_int << 32) ^ - static_cast<std::size_t>(primitive_restart_enable); +u32 FixedPipelineState::PackCullFace(Maxwell::CullFace cull) noexcept { + // FrontAndBack is 0x408, by substracting 0x406 in it we get 2. + // Individual cull faces are in 0x404 and 0x405, substracting 0x404 we get 0 and 1. + const u32 value = static_cast<u32>(cull); + return value - (value == 0x408 ? 0x406 : 0x404); } -bool FixedPipelineState::InputAssembly::operator==(const InputAssembly& rhs) const noexcept { - return std::tie(topology, primitive_restart_enable, point_size) == - std::tie(rhs.topology, rhs.primitive_restart_enable, rhs.point_size); +Maxwell::CullFace FixedPipelineState::UnpackCullFace(u32 packed) noexcept { + static constexpr std::array LUT = {Maxwell::CullFace::Front, Maxwell::CullFace::Back, + Maxwell::CullFace::FrontAndBack}; + return LUT[packed]; } -std::size_t FixedPipelineState::Tessellation::Hash() const noexcept { - return static_cast<std::size_t>(patch_control_points) ^ - (static_cast<std::size_t>(primitive) << 6) ^ (static_cast<std::size_t>(spacing) << 8) ^ - (static_cast<std::size_t>(clockwise) << 10); +u32 FixedPipelineState::PackFrontFace(Maxwell::FrontFace face) noexcept { + return static_cast<u32>(face) - 0x900; } -bool FixedPipelineState::Tessellation::operator==(const Tessellation& rhs) const noexcept { - return std::tie(patch_control_points, primitive, spacing, clockwise) == - std::tie(rhs.patch_control_points, rhs.primitive, rhs.spacing, rhs.clockwise); +Maxwell::FrontFace FixedPipelineState::UnpackFrontFace(u32 packed) noexcept { + return static_cast<Maxwell::FrontFace>(packed + 0x900); } -std::size_t FixedPipelineState::Rasterizer::Hash() const noexcept { - return static_cast<std::size_t>(cull_enable) ^ - (static_cast<std::size_t>(depth_bias_enable) << 1) ^ - (static_cast<std::size_t>(depth_clamp_enable) << 2) ^ - (static_cast<std::size_t>(ndc_minus_one_to_one) << 3) ^ - (static_cast<std::size_t>(cull_face) << 24) ^ - (static_cast<std::size_t>(front_face) << 48); +u32 FixedPipelineState::PackPolygonMode(Maxwell::PolygonMode mode) noexcept { + return static_cast<u32>(mode) - 0x1B00; } -bool FixedPipelineState::Rasterizer::operator==(const Rasterizer& rhs) const noexcept { - return std::tie(cull_enable, depth_bias_enable, depth_clamp_enable, ndc_minus_one_to_one, - cull_face, front_face) == - std::tie(rhs.cull_enable, rhs.depth_bias_enable, rhs.depth_clamp_enable, - rhs.ndc_minus_one_to_one, rhs.cull_face, rhs.front_face); +Maxwell::PolygonMode FixedPipelineState::UnpackPolygonMode(u32 packed) noexcept { + return static_cast<Maxwell::PolygonMode>(packed + 0x1B00); } -std::size_t FixedPipelineState::DepthStencil::Hash() const noexcept { - std::size_t hash = static_cast<std::size_t>(depth_test_enable) ^ - (static_cast<std::size_t>(depth_write_enable) << 1) ^ - (static_cast<std::size_t>(depth_bounds_enable) << 2) ^ - (static_cast<std::size_t>(stencil_enable) << 3) ^ - (static_cast<std::size_t>(depth_test_function) << 4); - boost::hash_combine(hash, front_stencil.Hash()); - boost::hash_combine(hash, back_stencil.Hash()); - return hash; +u32 FixedPipelineState::PackLogicOp(Maxwell::LogicOperation op) noexcept { + return static_cast<u32>(op) - 0x1500; } -bool FixedPipelineState::DepthStencil::operator==(const DepthStencil& rhs) const noexcept { - return std::tie(depth_test_enable, depth_write_enable, depth_bounds_enable, depth_test_function, - stencil_enable, front_stencil, back_stencil) == - std::tie(rhs.depth_test_enable, rhs.depth_write_enable, rhs.depth_bounds_enable, - rhs.depth_test_function, rhs.stencil_enable, rhs.front_stencil, - rhs.back_stencil); +Maxwell::LogicOperation FixedPipelineState::UnpackLogicOp(u32 packed) noexcept { + return static_cast<Maxwell::LogicOperation>(packed + 0x1500); } -std::size_t FixedPipelineState::ColorBlending::Hash() const noexcept { - std::size_t hash = attachments_count << 13; - for (std::size_t rt = 0; rt < static_cast<std::size_t>(attachments_count); ++rt) { - boost::hash_combine(hash, attachments[rt].Hash()); +u32 FixedPipelineState::PackBlendEquation(Maxwell::Blend::Equation equation) noexcept { + switch (equation) { + case Maxwell::Blend::Equation::Add: + case Maxwell::Blend::Equation::AddGL: + return 0; + case Maxwell::Blend::Equation::Subtract: + case Maxwell::Blend::Equation::SubtractGL: + return 1; + case Maxwell::Blend::Equation::ReverseSubtract: + case Maxwell::Blend::Equation::ReverseSubtractGL: + return 2; + case Maxwell::Blend::Equation::Min: + case Maxwell::Blend::Equation::MinGL: + return 3; + case Maxwell::Blend::Equation::Max: + case Maxwell::Blend::Equation::MaxGL: + return 4; } - return hash; + return 0; } -bool FixedPipelineState::ColorBlending::operator==(const ColorBlending& rhs) const noexcept { - return std::equal(attachments.begin(), attachments.begin() + attachments_count, - rhs.attachments.begin(), rhs.attachments.begin() + rhs.attachments_count); -} - -std::size_t FixedPipelineState::Hash() const noexcept { - std::size_t hash = 0; - boost::hash_combine(hash, vertex_input.Hash()); - boost::hash_combine(hash, input_assembly.Hash()); - boost::hash_combine(hash, tessellation.Hash()); - boost::hash_combine(hash, rasterizer.Hash()); - boost::hash_combine(hash, depth_stencil.Hash()); - boost::hash_combine(hash, color_blending.Hash()); - return hash; +Maxwell::Blend::Equation FixedPipelineState::UnpackBlendEquation(u32 packed) noexcept { + static constexpr std::array LUT = { + Maxwell::Blend::Equation::Add, Maxwell::Blend::Equation::Subtract, + Maxwell::Blend::Equation::ReverseSubtract, Maxwell::Blend::Equation::Min, + Maxwell::Blend::Equation::Max}; + return LUT[packed]; } -bool FixedPipelineState::operator==(const FixedPipelineState& rhs) const noexcept { - return std::tie(vertex_input, input_assembly, tessellation, rasterizer, depth_stencil, - color_blending) == std::tie(rhs.vertex_input, rhs.input_assembly, - rhs.tessellation, rhs.rasterizer, rhs.depth_stencil, - rhs.color_blending); +u32 FixedPipelineState::PackBlendFactor(Maxwell::Blend::Factor factor) noexcept { + switch (factor) { + case Maxwell::Blend::Factor::Zero: + case Maxwell::Blend::Factor::ZeroGL: + return 0; + case Maxwell::Blend::Factor::One: + case Maxwell::Blend::Factor::OneGL: + return 1; + case Maxwell::Blend::Factor::SourceColor: + case Maxwell::Blend::Factor::SourceColorGL: + return 2; + case Maxwell::Blend::Factor::OneMinusSourceColor: + case Maxwell::Blend::Factor::OneMinusSourceColorGL: + return 3; + case Maxwell::Blend::Factor::SourceAlpha: + case Maxwell::Blend::Factor::SourceAlphaGL: + return 4; + case Maxwell::Blend::Factor::OneMinusSourceAlpha: + case Maxwell::Blend::Factor::OneMinusSourceAlphaGL: + return 5; + case Maxwell::Blend::Factor::DestAlpha: + case Maxwell::Blend::Factor::DestAlphaGL: + return 6; + case Maxwell::Blend::Factor::OneMinusDestAlpha: + case Maxwell::Blend::Factor::OneMinusDestAlphaGL: + return 7; + case Maxwell::Blend::Factor::DestColor: + case Maxwell::Blend::Factor::DestColorGL: + return 8; + case Maxwell::Blend::Factor::OneMinusDestColor: + case Maxwell::Blend::Factor::OneMinusDestColorGL: + return 9; + case Maxwell::Blend::Factor::SourceAlphaSaturate: + case Maxwell::Blend::Factor::SourceAlphaSaturateGL: + return 10; + case Maxwell::Blend::Factor::Source1Color: + case Maxwell::Blend::Factor::Source1ColorGL: + return 11; + case Maxwell::Blend::Factor::OneMinusSource1Color: + case Maxwell::Blend::Factor::OneMinusSource1ColorGL: + return 12; + case Maxwell::Blend::Factor::Source1Alpha: + case Maxwell::Blend::Factor::Source1AlphaGL: + return 13; + case Maxwell::Blend::Factor::OneMinusSource1Alpha: + case Maxwell::Blend::Factor::OneMinusSource1AlphaGL: + return 14; + case Maxwell::Blend::Factor::ConstantColor: + case Maxwell::Blend::Factor::ConstantColorGL: + return 15; + case Maxwell::Blend::Factor::OneMinusConstantColor: + case Maxwell::Blend::Factor::OneMinusConstantColorGL: + return 16; + case Maxwell::Blend::Factor::ConstantAlpha: + case Maxwell::Blend::Factor::ConstantAlphaGL: + return 17; + case Maxwell::Blend::Factor::OneMinusConstantAlpha: + case Maxwell::Blend::Factor::OneMinusConstantAlphaGL: + return 18; + } + return 0; } -FixedPipelineState GetFixedPipelineState(const Maxwell& regs) { - FixedPipelineState fixed_state; - fixed_state.input_assembly = GetInputAssemblyState(regs); - fixed_state.tessellation = GetTessellationState(regs); - fixed_state.rasterizer = GetRasterizerState(regs); - fixed_state.depth_stencil = GetDepthStencilState(regs); - fixed_state.color_blending = GetColorBlendingState(regs); - return fixed_state; +Maxwell::Blend::Factor FixedPipelineState::UnpackBlendFactor(u32 packed) noexcept { + static constexpr std::array LUT = { + Maxwell::Blend::Factor::Zero, + Maxwell::Blend::Factor::One, + Maxwell::Blend::Factor::SourceColor, + Maxwell::Blend::Factor::OneMinusSourceColor, + Maxwell::Blend::Factor::SourceAlpha, + Maxwell::Blend::Factor::OneMinusSourceAlpha, + Maxwell::Blend::Factor::DestAlpha, + Maxwell::Blend::Factor::OneMinusDestAlpha, + Maxwell::Blend::Factor::DestColor, + Maxwell::Blend::Factor::OneMinusDestColor, + Maxwell::Blend::Factor::SourceAlphaSaturate, + Maxwell::Blend::Factor::Source1Color, + Maxwell::Blend::Factor::OneMinusSource1Color, + Maxwell::Blend::Factor::Source1Alpha, + Maxwell::Blend::Factor::OneMinusSource1Alpha, + Maxwell::Blend::Factor::ConstantColor, + Maxwell::Blend::Factor::OneMinusConstantColor, + Maxwell::Blend::Factor::ConstantAlpha, + Maxwell::Blend::Factor::OneMinusConstantAlpha, + }; + return LUT[packed]; } } // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/fixed_pipeline_state.h b/src/video_core/renderer_vulkan/fixed_pipeline_state.h index 4c8ba7f90..9fe6bdbf9 100644 --- a/src/video_core/renderer_vulkan/fixed_pipeline_state.h +++ b/src/video_core/renderer_vulkan/fixed_pipeline_state.h @@ -7,6 +7,7 @@ #include <array> #include <type_traits> +#include "common/bit_field.h" #include "common/common_types.h" #include "video_core/engines/maxwell_3d.h" @@ -16,93 +17,48 @@ namespace Vulkan { using Maxwell = Tegra::Engines::Maxwell3D::Regs; -// TODO(Rodrigo): Optimize this structure. +struct alignas(32) FixedPipelineState { + static u32 PackComparisonOp(Maxwell::ComparisonOp op) noexcept; + static Maxwell::ComparisonOp UnpackComparisonOp(u32 packed) noexcept; -struct FixedPipelineState { - using PixelFormat = VideoCore::Surface::PixelFormat; + static u32 PackStencilOp(Maxwell::StencilOp op) noexcept; + static Maxwell::StencilOp UnpackStencilOp(u32 packed) noexcept; - struct VertexBinding { - constexpr VertexBinding(u32 index, u32 stride, u32 divisor) - : index{index}, stride{stride}, divisor{divisor} {} - VertexBinding() = default; + static u32 PackCullFace(Maxwell::CullFace cull) noexcept; + static Maxwell::CullFace UnpackCullFace(u32 packed) noexcept; - u32 index; - u32 stride; - u32 divisor; + static u32 PackFrontFace(Maxwell::FrontFace face) noexcept; + static Maxwell::FrontFace UnpackFrontFace(u32 packed) noexcept; - std::size_t Hash() const noexcept; - - bool operator==(const VertexBinding& rhs) const noexcept; - - bool operator!=(const VertexBinding& rhs) const noexcept { - return !operator==(rhs); - } - }; - - struct VertexAttribute { - constexpr VertexAttribute(u32 index, u32 buffer, Maxwell::VertexAttribute::Type type, - Maxwell::VertexAttribute::Size size, u32 offset) - : index{index}, buffer{buffer}, type{type}, size{size}, offset{offset} {} - VertexAttribute() = default; - - u32 index; - u32 buffer; - Maxwell::VertexAttribute::Type type; - Maxwell::VertexAttribute::Size size; - u32 offset; - - std::size_t Hash() const noexcept; - - bool operator==(const VertexAttribute& rhs) const noexcept; - - bool operator!=(const VertexAttribute& rhs) const noexcept { - return !operator==(rhs); - } - }; - - struct StencilFace { - constexpr StencilFace(Maxwell::StencilOp action_stencil_fail, - Maxwell::StencilOp action_depth_fail, - Maxwell::StencilOp action_depth_pass, Maxwell::ComparisonOp test_func) - : action_stencil_fail{action_stencil_fail}, action_depth_fail{action_depth_fail}, - action_depth_pass{action_depth_pass}, test_func{test_func} {} - StencilFace() = default; - - Maxwell::StencilOp action_stencil_fail; - Maxwell::StencilOp action_depth_fail; - Maxwell::StencilOp action_depth_pass; - Maxwell::ComparisonOp test_func; + static u32 PackPolygonMode(Maxwell::PolygonMode mode) noexcept; + static Maxwell::PolygonMode UnpackPolygonMode(u32 packed) noexcept; - std::size_t Hash() const noexcept; + static u32 PackLogicOp(Maxwell::LogicOperation op) noexcept; + static Maxwell::LogicOperation UnpackLogicOp(u32 packed) noexcept; - bool operator==(const StencilFace& rhs) const noexcept; + static u32 PackBlendEquation(Maxwell::Blend::Equation equation) noexcept; + static Maxwell::Blend::Equation UnpackBlendEquation(u32 packed) noexcept; - bool operator!=(const StencilFace& rhs) const noexcept { - return !operator==(rhs); - } - }; + static u32 PackBlendFactor(Maxwell::Blend::Factor factor) noexcept; + static Maxwell::Blend::Factor UnpackBlendFactor(u32 packed) noexcept; struct BlendingAttachment { - constexpr BlendingAttachment(bool enable, Maxwell::Blend::Equation rgb_equation, - Maxwell::Blend::Factor src_rgb_func, - Maxwell::Blend::Factor dst_rgb_func, - Maxwell::Blend::Equation a_equation, - Maxwell::Blend::Factor src_a_func, - Maxwell::Blend::Factor dst_a_func, - std::array<bool, 4> components) - : enable{enable}, rgb_equation{rgb_equation}, src_rgb_func{src_rgb_func}, - dst_rgb_func{dst_rgb_func}, a_equation{a_equation}, src_a_func{src_a_func}, - dst_a_func{dst_a_func}, components{components} {} - BlendingAttachment() = default; - - bool enable; - Maxwell::Blend::Equation rgb_equation; - Maxwell::Blend::Factor src_rgb_func; - Maxwell::Blend::Factor dst_rgb_func; - Maxwell::Blend::Equation a_equation; - Maxwell::Blend::Factor src_a_func; - Maxwell::Blend::Factor dst_a_func; - std::array<bool, 4> components; + union { + u32 raw; + BitField<0, 1, u32> mask_r; + BitField<1, 1, u32> mask_g; + BitField<2, 1, u32> mask_b; + BitField<3, 1, u32> mask_a; + BitField<4, 3, u32> equation_rgb; + BitField<7, 3, u32> equation_a; + BitField<10, 5, u32> factor_source_rgb; + BitField<15, 5, u32> factor_dest_rgb; + BitField<20, 5, u32> factor_source_a; + BitField<25, 5, u32> factor_dest_a; + BitField<30, 1, u32> enable; + }; + + void Fill(const Maxwell& regs, std::size_t index); std::size_t Hash() const noexcept; @@ -111,136 +67,178 @@ struct FixedPipelineState { bool operator!=(const BlendingAttachment& rhs) const noexcept { return !operator==(rhs); } - }; - - struct VertexInput { - std::size_t num_bindings = 0; - std::size_t num_attributes = 0; - std::array<VertexBinding, Maxwell::NumVertexArrays> bindings; - std::array<VertexAttribute, Maxwell::NumVertexAttributes> attributes; - - std::size_t Hash() const noexcept; - bool operator==(const VertexInput& rhs) const noexcept; + constexpr std::array<bool, 4> Mask() const noexcept { + return {mask_r != 0, mask_g != 0, mask_b != 0, mask_a != 0}; + } - bool operator!=(const VertexInput& rhs) const noexcept { - return !operator==(rhs); + Maxwell::Blend::Equation EquationRGB() const noexcept { + return UnpackBlendEquation(equation_rgb.Value()); } - }; - struct InputAssembly { - constexpr InputAssembly(Maxwell::PrimitiveTopology topology, bool primitive_restart_enable, - float point_size) - : topology{topology}, primitive_restart_enable{primitive_restart_enable}, - point_size{point_size} {} - InputAssembly() = default; + Maxwell::Blend::Equation EquationAlpha() const noexcept { + return UnpackBlendEquation(equation_a.Value()); + } - Maxwell::PrimitiveTopology topology; - bool primitive_restart_enable; - float point_size; + Maxwell::Blend::Factor SourceRGBFactor() const noexcept { + return UnpackBlendFactor(factor_source_rgb.Value()); + } - std::size_t Hash() const noexcept; + Maxwell::Blend::Factor DestRGBFactor() const noexcept { + return UnpackBlendFactor(factor_dest_rgb.Value()); + } - bool operator==(const InputAssembly& rhs) const noexcept; + Maxwell::Blend::Factor SourceAlphaFactor() const noexcept { + return UnpackBlendFactor(factor_source_a.Value()); + } - bool operator!=(const InputAssembly& rhs) const noexcept { - return !operator==(rhs); + Maxwell::Blend::Factor DestAlphaFactor() const noexcept { + return UnpackBlendFactor(factor_dest_a.Value()); } }; - struct Tessellation { - constexpr Tessellation(u32 patch_control_points, Maxwell::TessellationPrimitive primitive, - Maxwell::TessellationSpacing spacing, bool clockwise) - : patch_control_points{patch_control_points}, primitive{primitive}, spacing{spacing}, - clockwise{clockwise} {} - Tessellation() = default; - - u32 patch_control_points; - Maxwell::TessellationPrimitive primitive; - Maxwell::TessellationSpacing spacing; - bool clockwise; - - std::size_t Hash() const noexcept; - - bool operator==(const Tessellation& rhs) const noexcept; + struct VertexInput { + union Binding { + u16 raw; + BitField<0, 1, u16> enabled; + BitField<1, 12, u16> stride; + }; + + union Attribute { + u32 raw; + BitField<0, 1, u32> enabled; + BitField<1, 5, u32> buffer; + BitField<6, 14, u32> offset; + BitField<20, 3, u32> type; + BitField<23, 6, u32> size; + + constexpr Maxwell::VertexAttribute::Type Type() const noexcept { + return static_cast<Maxwell::VertexAttribute::Type>(type.Value()); + } + + constexpr Maxwell::VertexAttribute::Size Size() const noexcept { + return static_cast<Maxwell::VertexAttribute::Size>(size.Value()); + } + }; + + std::array<Binding, Maxwell::NumVertexArrays> bindings; + std::array<u32, Maxwell::NumVertexArrays> binding_divisors; + std::array<Attribute, Maxwell::NumVertexAttributes> attributes; + + void SetBinding(std::size_t index, bool enabled, u32 stride, u32 divisor) noexcept { + auto& binding = bindings[index]; + binding.raw = 0; + binding.enabled.Assign(enabled ? 1 : 0); + binding.stride.Assign(stride); + binding_divisors[index] = divisor; + } - bool operator!=(const Tessellation& rhs) const noexcept { - return !operator==(rhs); + void SetAttribute(std::size_t index, bool enabled, u32 buffer, u32 offset, + Maxwell::VertexAttribute::Type type, + Maxwell::VertexAttribute::Size size) noexcept { + auto& attribute = attributes[index]; + attribute.raw = 0; + attribute.enabled.Assign(enabled ? 1 : 0); + attribute.buffer.Assign(buffer); + attribute.offset.Assign(offset); + attribute.type.Assign(static_cast<u32>(type)); + attribute.size.Assign(static_cast<u32>(size)); } }; struct Rasterizer { - constexpr Rasterizer(bool cull_enable, bool depth_bias_enable, bool depth_clamp_enable, - bool ndc_minus_one_to_one, Maxwell::CullFace cull_face, - Maxwell::FrontFace front_face) - : cull_enable{cull_enable}, depth_bias_enable{depth_bias_enable}, - depth_clamp_enable{depth_clamp_enable}, ndc_minus_one_to_one{ndc_minus_one_to_one}, - cull_face{cull_face}, front_face{front_face} {} - Rasterizer() = default; - - bool cull_enable; - bool depth_bias_enable; - bool depth_clamp_enable; - bool ndc_minus_one_to_one; - Maxwell::CullFace cull_face; - Maxwell::FrontFace front_face; - - std::size_t Hash() const noexcept; + union { + u32 raw; + BitField<0, 4, u32> topology; + BitField<4, 1, u32> primitive_restart_enable; + BitField<5, 1, u32> cull_enable; + BitField<6, 1, u32> depth_bias_enable; + BitField<7, 1, u32> depth_clamp_enable; + BitField<8, 1, u32> ndc_minus_one_to_one; + BitField<9, 2, u32> cull_face; + BitField<11, 1, u32> front_face; + BitField<12, 2, u32> polygon_mode; + BitField<14, 5, u32> patch_control_points_minus_one; + BitField<19, 2, u32> tessellation_primitive; + BitField<21, 2, u32> tessellation_spacing; + BitField<23, 1, u32> tessellation_clockwise; + BitField<24, 1, u32> logic_op_enable; + BitField<25, 4, u32> logic_op; + }; + + // TODO(Rodrigo): Move this to push constants + u32 point_size; + + void Fill(const Maxwell& regs) noexcept; + + constexpr Maxwell::PrimitiveTopology Topology() const noexcept { + return static_cast<Maxwell::PrimitiveTopology>(topology.Value()); + } - bool operator==(const Rasterizer& rhs) const noexcept; + Maxwell::CullFace CullFace() const noexcept { + return UnpackCullFace(cull_face.Value()); + } - bool operator!=(const Rasterizer& rhs) const noexcept { - return !operator==(rhs); + Maxwell::FrontFace FrontFace() const noexcept { + return UnpackFrontFace(front_face.Value()); } }; struct DepthStencil { - constexpr DepthStencil(bool depth_test_enable, bool depth_write_enable, - bool depth_bounds_enable, bool stencil_enable, - Maxwell::ComparisonOp depth_test_function, StencilFace front_stencil, - StencilFace back_stencil) - : depth_test_enable{depth_test_enable}, depth_write_enable{depth_write_enable}, - depth_bounds_enable{depth_bounds_enable}, stencil_enable{stencil_enable}, - depth_test_function{depth_test_function}, front_stencil{front_stencil}, - back_stencil{back_stencil} {} - DepthStencil() = default; - - bool depth_test_enable; - bool depth_write_enable; - bool depth_bounds_enable; - bool stencil_enable; - Maxwell::ComparisonOp depth_test_function; - StencilFace front_stencil; - StencilFace back_stencil; - - std::size_t Hash() const noexcept; - - bool operator==(const DepthStencil& rhs) const noexcept; - - bool operator!=(const DepthStencil& rhs) const noexcept { - return !operator==(rhs); + template <std::size_t Position> + union StencilFace { + BitField<Position + 0, 3, u32> action_stencil_fail; + BitField<Position + 3, 3, u32> action_depth_fail; + BitField<Position + 6, 3, u32> action_depth_pass; + BitField<Position + 9, 3, u32> test_func; + + Maxwell::StencilOp ActionStencilFail() const noexcept { + return UnpackStencilOp(action_stencil_fail); + } + + Maxwell::StencilOp ActionDepthFail() const noexcept { + return UnpackStencilOp(action_depth_fail); + } + + Maxwell::StencilOp ActionDepthPass() const noexcept { + return UnpackStencilOp(action_depth_pass); + } + + Maxwell::ComparisonOp TestFunc() const noexcept { + return UnpackComparisonOp(test_func); + } + }; + + union { + u32 raw; + StencilFace<0> front; + StencilFace<12> back; + BitField<24, 1, u32> depth_test_enable; + BitField<25, 1, u32> depth_write_enable; + BitField<26, 1, u32> depth_bounds_enable; + BitField<27, 1, u32> stencil_enable; + BitField<28, 3, u32> depth_test_func; + }; + + void Fill(const Maxwell& regs) noexcept; + + Maxwell::ComparisonOp DepthTestFunc() const noexcept { + return UnpackComparisonOp(depth_test_func); } }; struct ColorBlending { - constexpr ColorBlending( - std::array<float, 4> blend_constants, std::size_t attachments_count, - std::array<BlendingAttachment, Maxwell::NumRenderTargets> attachments) - : attachments_count{attachments_count}, attachments{attachments} {} - ColorBlending() = default; - - std::size_t attachments_count; std::array<BlendingAttachment, Maxwell::NumRenderTargets> attachments; - std::size_t Hash() const noexcept; - - bool operator==(const ColorBlending& rhs) const noexcept; - - bool operator!=(const ColorBlending& rhs) const noexcept { - return !operator==(rhs); - } + void Fill(const Maxwell& regs) noexcept; }; + VertexInput vertex_input; + Rasterizer rasterizer; + DepthStencil depth_stencil; + ColorBlending color_blending; + std::array<u8, 20> padding; + std::size_t Hash() const noexcept; bool operator==(const FixedPipelineState& rhs) const noexcept; @@ -248,25 +246,11 @@ struct FixedPipelineState { bool operator!=(const FixedPipelineState& rhs) const noexcept { return !operator==(rhs); } - - VertexInput vertex_input; - InputAssembly input_assembly; - Tessellation tessellation; - Rasterizer rasterizer; - DepthStencil depth_stencil; - ColorBlending color_blending; }; -static_assert(std::is_trivially_copyable_v<FixedPipelineState::VertexBinding>); -static_assert(std::is_trivially_copyable_v<FixedPipelineState::VertexAttribute>); -static_assert(std::is_trivially_copyable_v<FixedPipelineState::StencilFace>); -static_assert(std::is_trivially_copyable_v<FixedPipelineState::BlendingAttachment>); -static_assert(std::is_trivially_copyable_v<FixedPipelineState::VertexInput>); -static_assert(std::is_trivially_copyable_v<FixedPipelineState::InputAssembly>); -static_assert(std::is_trivially_copyable_v<FixedPipelineState::Tessellation>); -static_assert(std::is_trivially_copyable_v<FixedPipelineState::Rasterizer>); -static_assert(std::is_trivially_copyable_v<FixedPipelineState::DepthStencil>); -static_assert(std::is_trivially_copyable_v<FixedPipelineState::ColorBlending>); +static_assert(std::has_unique_object_representations_v<FixedPipelineState>); static_assert(std::is_trivially_copyable_v<FixedPipelineState>); +static_assert(std::is_trivially_constructible_v<FixedPipelineState>); +static_assert(sizeof(FixedPipelineState) % 32 == 0, "Size is not aligned"); FixedPipelineState GetFixedPipelineState(const Maxwell& regs); diff --git a/src/video_core/renderer_vulkan/nsight_aftermath_tracker.cpp b/src/video_core/renderer_vulkan/nsight_aftermath_tracker.cpp new file mode 100644 index 000000000..435c8c1b8 --- /dev/null +++ b/src/video_core/renderer_vulkan/nsight_aftermath_tracker.cpp @@ -0,0 +1,220 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#ifdef HAS_NSIGHT_AFTERMATH + +#include <mutex> +#include <string> +#include <string_view> +#include <utility> +#include <vector> + +#include <fmt/format.h> + +#define VK_NO_PROTOTYPES +#include <vulkan/vulkan.h> + +#include <GFSDK_Aftermath.h> +#include <GFSDK_Aftermath_Defines.h> +#include <GFSDK_Aftermath_GpuCrashDump.h> +#include <GFSDK_Aftermath_GpuCrashDumpDecoding.h> + +#include "common/common_paths.h" +#include "common/common_types.h" +#include "common/file_util.h" +#include "common/logging/log.h" +#include "common/scope_exit.h" + +#include "video_core/renderer_vulkan/nsight_aftermath_tracker.h" + +namespace Vulkan { + +static constexpr char AFTERMATH_LIB_NAME[] = "GFSDK_Aftermath_Lib.x64.dll"; + +NsightAftermathTracker::NsightAftermathTracker() = default; + +NsightAftermathTracker::~NsightAftermathTracker() { + if (initialized) { + (void)GFSDK_Aftermath_DisableGpuCrashDumps(); + } +} + +bool NsightAftermathTracker::Initialize() { + if (!dl.Open(AFTERMATH_LIB_NAME)) { + LOG_ERROR(Render_Vulkan, "Failed to load Nsight Aftermath DLL"); + return false; + } + + if (!dl.GetSymbol("GFSDK_Aftermath_DisableGpuCrashDumps", + &GFSDK_Aftermath_DisableGpuCrashDumps) || + !dl.GetSymbol("GFSDK_Aftermath_EnableGpuCrashDumps", + &GFSDK_Aftermath_EnableGpuCrashDumps) || + !dl.GetSymbol("GFSDK_Aftermath_GetShaderDebugInfoIdentifier", + &GFSDK_Aftermath_GetShaderDebugInfoIdentifier) || + !dl.GetSymbol("GFSDK_Aftermath_GetShaderHashSpirv", &GFSDK_Aftermath_GetShaderHashSpirv) || + !dl.GetSymbol("GFSDK_Aftermath_GpuCrashDump_CreateDecoder", + &GFSDK_Aftermath_GpuCrashDump_CreateDecoder) || + !dl.GetSymbol("GFSDK_Aftermath_GpuCrashDump_DestroyDecoder", + &GFSDK_Aftermath_GpuCrashDump_DestroyDecoder) || + !dl.GetSymbol("GFSDK_Aftermath_GpuCrashDump_GenerateJSON", + &GFSDK_Aftermath_GpuCrashDump_GenerateJSON) || + !dl.GetSymbol("GFSDK_Aftermath_GpuCrashDump_GetJSON", + &GFSDK_Aftermath_GpuCrashDump_GetJSON)) { + LOG_ERROR(Render_Vulkan, "Failed to load Nsight Aftermath function pointers"); + return false; + } + + dump_dir = FileUtil::GetUserPath(FileUtil::UserPath::LogDir) + "gpucrash"; + + (void)FileUtil::DeleteDirRecursively(dump_dir); + if (!FileUtil::CreateDir(dump_dir)) { + LOG_ERROR(Render_Vulkan, "Failed to create Nsight Aftermath dump directory"); + return false; + } + + if (!GFSDK_Aftermath_SUCCEED(GFSDK_Aftermath_EnableGpuCrashDumps( + GFSDK_Aftermath_Version_API, GFSDK_Aftermath_GpuCrashDumpWatchedApiFlags_Vulkan, + GFSDK_Aftermath_GpuCrashDumpFeatureFlags_Default, GpuCrashDumpCallback, + ShaderDebugInfoCallback, CrashDumpDescriptionCallback, this))) { + LOG_ERROR(Render_Vulkan, "GFSDK_Aftermath_EnableGpuCrashDumps failed"); + return false; + } + + LOG_INFO(Render_Vulkan, "Nsight Aftermath dump directory is \"{}\"", dump_dir); + + initialized = true; + return true; +} + +void NsightAftermathTracker::SaveShader(const std::vector<u32>& spirv) const { + if (!initialized) { + return; + } + + std::vector<u32> spirv_copy = spirv; + GFSDK_Aftermath_SpirvCode shader; + shader.pData = spirv_copy.data(); + shader.size = static_cast<u32>(spirv_copy.size() * 4); + + std::scoped_lock lock{mutex}; + + GFSDK_Aftermath_ShaderHash hash; + if (!GFSDK_Aftermath_SUCCEED( + GFSDK_Aftermath_GetShaderHashSpirv(GFSDK_Aftermath_Version_API, &shader, &hash))) { + LOG_ERROR(Render_Vulkan, "Failed to hash SPIR-V module"); + return; + } + + FileUtil::IOFile file(fmt::format("{}/source_{:016x}.spv", dump_dir, hash.hash), "wb"); + if (!file.IsOpen()) { + LOG_ERROR(Render_Vulkan, "Failed to dump SPIR-V module with hash={:016x}", hash.hash); + return; + } + if (file.WriteArray(spirv.data(), spirv.size()) != spirv.size()) { + LOG_ERROR(Render_Vulkan, "Failed to write SPIR-V module with hash={:016x}", hash.hash); + return; + } +} + +void NsightAftermathTracker::OnGpuCrashDumpCallback(const void* gpu_crash_dump, + u32 gpu_crash_dump_size) { + std::scoped_lock lock{mutex}; + + LOG_CRITICAL(Render_Vulkan, "called"); + + GFSDK_Aftermath_GpuCrashDump_Decoder decoder; + if (!GFSDK_Aftermath_SUCCEED(GFSDK_Aftermath_GpuCrashDump_CreateDecoder( + GFSDK_Aftermath_Version_API, gpu_crash_dump, gpu_crash_dump_size, &decoder))) { + LOG_ERROR(Render_Vulkan, "Failed to create decoder"); + return; + } + SCOPE_EXIT({ GFSDK_Aftermath_GpuCrashDump_DestroyDecoder(decoder); }); + + u32 json_size = 0; + if (!GFSDK_Aftermath_SUCCEED(GFSDK_Aftermath_GpuCrashDump_GenerateJSON( + decoder, GFSDK_Aftermath_GpuCrashDumpDecoderFlags_ALL_INFO, + GFSDK_Aftermath_GpuCrashDumpFormatterFlags_NONE, nullptr, nullptr, nullptr, nullptr, + this, &json_size))) { + LOG_ERROR(Render_Vulkan, "Failed to generate JSON"); + return; + } + std::vector<char> json(json_size); + if (!GFSDK_Aftermath_SUCCEED( + GFSDK_Aftermath_GpuCrashDump_GetJSON(decoder, json_size, json.data()))) { + LOG_ERROR(Render_Vulkan, "Failed to query JSON"); + return; + } + + const std::string base_name = [this] { + const int id = dump_id++; + if (id == 0) { + return fmt::format("{}/crash.nv-gpudmp", dump_dir); + } else { + return fmt::format("{}/crash_{}.nv-gpudmp", dump_dir, id); + } + }(); + + std::string_view dump_view(static_cast<const char*>(gpu_crash_dump), gpu_crash_dump_size); + if (FileUtil::WriteStringToFile(false, base_name, dump_view) != gpu_crash_dump_size) { + LOG_ERROR(Render_Vulkan, "Failed to write dump file"); + return; + } + const std::string_view json_view(json.data(), json.size()); + if (FileUtil::WriteStringToFile(true, base_name + ".json", json_view) != json.size()) { + LOG_ERROR(Render_Vulkan, "Failed to write JSON"); + return; + } +} + +void NsightAftermathTracker::OnShaderDebugInfoCallback(const void* shader_debug_info, + u32 shader_debug_info_size) { + std::scoped_lock lock{mutex}; + + GFSDK_Aftermath_ShaderDebugInfoIdentifier identifier; + if (!GFSDK_Aftermath_SUCCEED(GFSDK_Aftermath_GetShaderDebugInfoIdentifier( + GFSDK_Aftermath_Version_API, shader_debug_info, shader_debug_info_size, &identifier))) { + LOG_ERROR(Render_Vulkan, "GFSDK_Aftermath_GetShaderDebugInfoIdentifier failed"); + return; + } + + const std::string path = + fmt::format("{}/shader_{:016x}{:016x}.nvdbg", dump_dir, identifier.id[0], identifier.id[1]); + FileUtil::IOFile file(path, "wb"); + if (!file.IsOpen()) { + LOG_ERROR(Render_Vulkan, "Failed to create file {}", path); + return; + } + if (file.WriteBytes(static_cast<const u8*>(shader_debug_info), shader_debug_info_size) != + shader_debug_info_size) { + LOG_ERROR(Render_Vulkan, "Failed to write file {}", path); + return; + } +} + +void NsightAftermathTracker::OnCrashDumpDescriptionCallback( + PFN_GFSDK_Aftermath_AddGpuCrashDumpDescription add_description) { + add_description(GFSDK_Aftermath_GpuCrashDumpDescriptionKey_ApplicationName, "yuzu"); +} + +void NsightAftermathTracker::GpuCrashDumpCallback(const void* gpu_crash_dump, + u32 gpu_crash_dump_size, void* user_data) { + static_cast<NsightAftermathTracker*>(user_data)->OnGpuCrashDumpCallback(gpu_crash_dump, + gpu_crash_dump_size); +} + +void NsightAftermathTracker::ShaderDebugInfoCallback(const void* shader_debug_info, + u32 shader_debug_info_size, void* user_data) { + static_cast<NsightAftermathTracker*>(user_data)->OnShaderDebugInfoCallback( + shader_debug_info, shader_debug_info_size); +} + +void NsightAftermathTracker::CrashDumpDescriptionCallback( + PFN_GFSDK_Aftermath_AddGpuCrashDumpDescription add_description, void* user_data) { + static_cast<NsightAftermathTracker*>(user_data)->OnCrashDumpDescriptionCallback( + add_description); +} + +} // namespace Vulkan + +#endif // HAS_NSIGHT_AFTERMATH diff --git a/src/video_core/renderer_vulkan/nsight_aftermath_tracker.h b/src/video_core/renderer_vulkan/nsight_aftermath_tracker.h new file mode 100644 index 000000000..afe7ae99e --- /dev/null +++ b/src/video_core/renderer_vulkan/nsight_aftermath_tracker.h @@ -0,0 +1,87 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include <mutex> +#include <string> +#include <vector> + +#define VK_NO_PROTOTYPES +#include <vulkan/vulkan.h> + +#ifdef HAS_NSIGHT_AFTERMATH +#include <GFSDK_Aftermath_Defines.h> +#include <GFSDK_Aftermath_GpuCrashDump.h> +#include <GFSDK_Aftermath_GpuCrashDumpDecoding.h> +#endif + +#include "common/common_types.h" +#include "common/dynamic_library.h" + +namespace Vulkan { + +class NsightAftermathTracker { +public: + NsightAftermathTracker(); + ~NsightAftermathTracker(); + + NsightAftermathTracker(const NsightAftermathTracker&) = delete; + NsightAftermathTracker& operator=(const NsightAftermathTracker&) = delete; + + // Delete move semantics because Aftermath initialization uses a pointer to this. + NsightAftermathTracker(NsightAftermathTracker&&) = delete; + NsightAftermathTracker& operator=(NsightAftermathTracker&&) = delete; + + bool Initialize(); + + void SaveShader(const std::vector<u32>& spirv) const; + +private: +#ifdef HAS_NSIGHT_AFTERMATH + static void GpuCrashDumpCallback(const void* gpu_crash_dump, u32 gpu_crash_dump_size, + void* user_data); + + static void ShaderDebugInfoCallback(const void* shader_debug_info, u32 shader_debug_info_size, + void* user_data); + + static void CrashDumpDescriptionCallback( + PFN_GFSDK_Aftermath_AddGpuCrashDumpDescription add_description, void* user_data); + + void OnGpuCrashDumpCallback(const void* gpu_crash_dump, u32 gpu_crash_dump_size); + + void OnShaderDebugInfoCallback(const void* shader_debug_info, u32 shader_debug_info_size); + + void OnCrashDumpDescriptionCallback( + PFN_GFSDK_Aftermath_AddGpuCrashDumpDescription add_description); + + mutable std::mutex mutex; + + std::string dump_dir; + int dump_id = 0; + + bool initialized = false; + + Common::DynamicLibrary dl; + PFN_GFSDK_Aftermath_DisableGpuCrashDumps GFSDK_Aftermath_DisableGpuCrashDumps; + PFN_GFSDK_Aftermath_EnableGpuCrashDumps GFSDK_Aftermath_EnableGpuCrashDumps; + PFN_GFSDK_Aftermath_GetShaderDebugInfoIdentifier GFSDK_Aftermath_GetShaderDebugInfoIdentifier; + PFN_GFSDK_Aftermath_GetShaderHashSpirv GFSDK_Aftermath_GetShaderHashSpirv; + PFN_GFSDK_Aftermath_GpuCrashDump_CreateDecoder GFSDK_Aftermath_GpuCrashDump_CreateDecoder; + PFN_GFSDK_Aftermath_GpuCrashDump_DestroyDecoder GFSDK_Aftermath_GpuCrashDump_DestroyDecoder; + PFN_GFSDK_Aftermath_GpuCrashDump_GenerateJSON GFSDK_Aftermath_GpuCrashDump_GenerateJSON; + PFN_GFSDK_Aftermath_GpuCrashDump_GetJSON GFSDK_Aftermath_GpuCrashDump_GetJSON; +#endif +}; + +#ifndef HAS_NSIGHT_AFTERMATH +inline NsightAftermathTracker::NsightAftermathTracker() = default; +inline NsightAftermathTracker::~NsightAftermathTracker() = default; +inline bool NsightAftermathTracker::Initialize() { + return false; +} +inline void NsightAftermathTracker::SaveShader(const std::vector<u32>&) const {} +#endif + +} // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/renderer_vulkan.cpp b/src/video_core/renderer_vulkan/renderer_vulkan.cpp index dd590c38b..04532f8f8 100644 --- a/src/video_core/renderer_vulkan/renderer_vulkan.cpp +++ b/src/video_core/renderer_vulkan/renderer_vulkan.cpp @@ -42,7 +42,7 @@ #include <vulkan/vulkan_win32.h> #endif -#ifdef __linux__ +#if !defined(_WIN32) && !defined(__APPLE__) #include <X11/Xlib.h> #include <vulkan/vulkan_wayland.h> #include <vulkan/vulkan_xlib.h> @@ -119,7 +119,7 @@ vk::Instance CreateInstance(Common::DynamicLibrary& library, vk::InstanceDispatc extensions.push_back(VK_KHR_WIN32_SURFACE_EXTENSION_NAME); break; #endif -#ifdef __linux__ +#if !defined(_WIN32) && !defined(__APPLE__) case Core::Frontend::WindowSystemType::X11: extensions.push_back(VK_KHR_XLIB_SURFACE_EXTENSION_NAME); break; @@ -345,7 +345,7 @@ bool RendererVulkan::CreateSurface() { } } #endif -#ifdef __linux__ +#if !defined(_WIN32) && !defined(__APPLE__) if (window_info.type == Core::Frontend::WindowSystemType::X11) { const VkXlibSurfaceCreateInfoKHR xlib_ci{ VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR, nullptr, 0, diff --git a/src/video_core/renderer_vulkan/shaders/quad_indexed.comp b/src/video_core/renderer_vulkan/shaders/quad_indexed.comp new file mode 100644 index 000000000..5a472ba9b --- /dev/null +++ b/src/video_core/renderer_vulkan/shaders/quad_indexed.comp @@ -0,0 +1,50 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +/* + * Build instructions: + * $ glslangValidator -V quad_indexed.comp -o output.spv + * $ spirv-opt -O --strip-debug output.spv -o optimized.spv + * $ xxd -i optimized.spv + * + * Then copy that bytecode to the C++ file + */ + +#version 460 core + +layout (local_size_x = 1024) in; + +layout (std430, set = 0, binding = 0) readonly buffer InputBuffer { + uint input_indexes[]; +}; + +layout (std430, set = 0, binding = 1) writeonly buffer OutputBuffer { + uint output_indexes[]; +}; + +layout (push_constant) uniform PushConstants { + uint base_vertex; + int index_shift; // 0: uint8, 1: uint16, 2: uint32 +}; + +void main() { + int primitive = int(gl_GlobalInvocationID.x); + if (primitive * 6 >= output_indexes.length()) { + return; + } + + int index_size = 8 << index_shift; + int flipped_shift = 2 - index_shift; + int mask = (1 << flipped_shift) - 1; + + const int quad_swizzle[6] = int[](0, 1, 2, 0, 2, 3); + for (uint vertex = 0; vertex < 6; ++vertex) { + int offset = primitive * 4 + quad_swizzle[vertex]; + int int_offset = offset >> flipped_shift; + int bit_offset = (offset & mask) * index_size; + uint packed_input = input_indexes[int_offset]; + uint index = bitfieldExtract(packed_input, bit_offset, index_size); + output_indexes[primitive * 6 + vertex] = index + base_vertex; + } +} diff --git a/src/video_core/renderer_vulkan/vk_compute_pass.cpp b/src/video_core/renderer_vulkan/vk_compute_pass.cpp index 878a78755..7b0268033 100644 --- a/src/video_core/renderer_vulkan/vk_compute_pass.cpp +++ b/src/video_core/renderer_vulkan/vk_compute_pass.cpp @@ -135,11 +135,11 @@ VkDescriptorUpdateTemplateEntryKHR BuildQuadArrayPassDescriptorUpdateTemplateEnt return entry; } -VkPushConstantRange BuildQuadArrayPassPushConstantRange() { +VkPushConstantRange BuildComputePushConstantRange(std::size_t size) { VkPushConstantRange range; range.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT; range.offset = 0; - range.size = sizeof(u32); + range.size = static_cast<u32>(size); return range; } @@ -220,7 +220,130 @@ constexpr u8 uint8_pass[] = { 0xf9, 0x00, 0x02, 0x00, 0x1d, 0x00, 0x00, 0x00, 0xf8, 0x00, 0x02, 0x00, 0x1d, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x01, 0x00, 0x38, 0x00, 0x01, 0x00}; -std::array<VkDescriptorSetLayoutBinding, 2> BuildUint8PassDescriptorSetBindings() { +// Quad indexed SPIR-V module. Generated from the "shaders/" directory. +constexpr u8 QUAD_INDEXED_SPV[] = { + 0x03, 0x02, 0x23, 0x07, 0x00, 0x00, 0x01, 0x00, 0x07, 0x00, 0x08, 0x00, 0x7c, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x11, 0x00, 0x02, 0x00, 0x01, 0x00, 0x00, 0x00, 0x0b, 0x00, 0x06, 0x00, + 0x01, 0x00, 0x00, 0x00, 0x47, 0x4c, 0x53, 0x4c, 0x2e, 0x73, 0x74, 0x64, 0x2e, 0x34, 0x35, 0x30, + 0x00, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, + 0x0f, 0x00, 0x06, 0x00, 0x05, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x6d, 0x61, 0x69, 0x6e, + 0x00, 0x00, 0x00, 0x00, 0x0c, 0x00, 0x00, 0x00, 0x10, 0x00, 0x06, 0x00, 0x04, 0x00, 0x00, 0x00, + 0x11, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, + 0x47, 0x00, 0x04, 0x00, 0x0c, 0x00, 0x00, 0x00, 0x0b, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, + 0x47, 0x00, 0x04, 0x00, 0x15, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, + 0x48, 0x00, 0x04, 0x00, 0x16, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x19, 0x00, 0x00, 0x00, + 0x48, 0x00, 0x05, 0x00, 0x16, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x47, 0x00, 0x03, 0x00, 0x16, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, + 0x47, 0x00, 0x04, 0x00, 0x18, 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x47, 0x00, 0x04, 0x00, 0x18, 0x00, 0x00, 0x00, 0x21, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, + 0x48, 0x00, 0x05, 0x00, 0x22, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x48, 0x00, 0x05, 0x00, 0x22, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, + 0x23, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x47, 0x00, 0x03, 0x00, 0x22, 0x00, 0x00, 0x00, + 0x02, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x56, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, + 0x04, 0x00, 0x00, 0x00, 0x48, 0x00, 0x04, 0x00, 0x57, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x18, 0x00, 0x00, 0x00, 0x48, 0x00, 0x05, 0x00, 0x57, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x23, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x47, 0x00, 0x03, 0x00, 0x57, 0x00, 0x00, 0x00, + 0x03, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x59, 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x59, 0x00, 0x00, 0x00, 0x21, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x72, 0x00, 0x00, 0x00, 0x0b, 0x00, 0x00, 0x00, + 0x19, 0x00, 0x00, 0x00, 0x13, 0x00, 0x02, 0x00, 0x02, 0x00, 0x00, 0x00, 0x21, 0x00, 0x03, 0x00, + 0x03, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x15, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, + 0x20, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x07, 0x00, 0x00, 0x00, + 0x07, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x15, 0x00, 0x04, 0x00, 0x09, 0x00, 0x00, 0x00, + 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x17, 0x00, 0x04, 0x00, 0x0a, 0x00, 0x00, 0x00, + 0x09, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x0b, 0x00, 0x00, 0x00, + 0x01, 0x00, 0x00, 0x00, 0x0a, 0x00, 0x00, 0x00, 0x3b, 0x00, 0x04, 0x00, 0x0b, 0x00, 0x00, 0x00, + 0x0c, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x2b, 0x00, 0x04, 0x00, 0x09, 0x00, 0x00, 0x00, + 0x0d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x0e, 0x00, 0x00, 0x00, + 0x01, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, 0x2b, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, + 0x13, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x1d, 0x00, 0x03, 0x00, 0x15, 0x00, 0x00, 0x00, + 0x09, 0x00, 0x00, 0x00, 0x1e, 0x00, 0x03, 0x00, 0x16, 0x00, 0x00, 0x00, 0x15, 0x00, 0x00, 0x00, + 0x20, 0x00, 0x04, 0x00, 0x17, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x16, 0x00, 0x00, 0x00, + 0x3b, 0x00, 0x04, 0x00, 0x17, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, + 0x14, 0x00, 0x02, 0x00, 0x1b, 0x00, 0x00, 0x00, 0x2b, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, + 0x21, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x1e, 0x00, 0x04, 0x00, 0x22, 0x00, 0x00, 0x00, + 0x09, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x23, 0x00, 0x00, 0x00, + 0x09, 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, 0x00, 0x3b, 0x00, 0x04, 0x00, 0x23, 0x00, 0x00, 0x00, + 0x24, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, 0x2b, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, + 0x25, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x26, 0x00, 0x00, 0x00, + 0x09, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x2b, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, + 0x2b, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x2b, 0x00, 0x04, 0x00, 0x09, 0x00, 0x00, 0x00, + 0x3b, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x2b, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, + 0x3f, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x04, 0x00, 0x41, 0x00, 0x00, 0x00, + 0x06, 0x00, 0x00, 0x00, 0x3b, 0x00, 0x00, 0x00, 0x2b, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, + 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2b, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, + 0x43, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x2c, 0x00, 0x09, 0x00, 0x41, 0x00, 0x00, 0x00, + 0x44, 0x00, 0x00, 0x00, 0x42, 0x00, 0x00, 0x00, 0x25, 0x00, 0x00, 0x00, 0x2b, 0x00, 0x00, 0x00, + 0x42, 0x00, 0x00, 0x00, 0x2b, 0x00, 0x00, 0x00, 0x43, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, + 0x46, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0x41, 0x00, 0x00, 0x00, 0x1d, 0x00, 0x03, 0x00, + 0x56, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, 0x1e, 0x00, 0x03, 0x00, 0x57, 0x00, 0x00, 0x00, + 0x56, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x58, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, + 0x57, 0x00, 0x00, 0x00, 0x3b, 0x00, 0x04, 0x00, 0x58, 0x00, 0x00, 0x00, 0x59, 0x00, 0x00, 0x00, + 0x02, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x5b, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, + 0x09, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x69, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, + 0x09, 0x00, 0x00, 0x00, 0x2b, 0x00, 0x04, 0x00, 0x09, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00, 0x00, + 0x00, 0x04, 0x00, 0x00, 0x2b, 0x00, 0x04, 0x00, 0x09, 0x00, 0x00, 0x00, 0x71, 0x00, 0x00, 0x00, + 0x01, 0x00, 0x00, 0x00, 0x2c, 0x00, 0x06, 0x00, 0x0a, 0x00, 0x00, 0x00, 0x72, 0x00, 0x00, 0x00, + 0x70, 0x00, 0x00, 0x00, 0x71, 0x00, 0x00, 0x00, 0x71, 0x00, 0x00, 0x00, 0x36, 0x00, 0x05, 0x00, + 0x02, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, + 0xf8, 0x00, 0x02, 0x00, 0x05, 0x00, 0x00, 0x00, 0x3b, 0x00, 0x04, 0x00, 0x46, 0x00, 0x00, 0x00, + 0x47, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0xf9, 0x00, 0x02, 0x00, 0x74, 0x00, 0x00, 0x00, + 0xf8, 0x00, 0x02, 0x00, 0x74, 0x00, 0x00, 0x00, 0xf6, 0x00, 0x04, 0x00, 0x73, 0x00, 0x00, 0x00, + 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf9, 0x00, 0x02, 0x00, 0x75, 0x00, 0x00, 0x00, + 0xf8, 0x00, 0x02, 0x00, 0x75, 0x00, 0x00, 0x00, 0x41, 0x00, 0x05, 0x00, 0x0e, 0x00, 0x00, 0x00, + 0x0f, 0x00, 0x00, 0x00, 0x0c, 0x00, 0x00, 0x00, 0x0d, 0x00, 0x00, 0x00, 0x3d, 0x00, 0x04, 0x00, + 0x09, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x0f, 0x00, 0x00, 0x00, 0x7c, 0x00, 0x04, 0x00, + 0x06, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x84, 0x00, 0x05, 0x00, + 0x06, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 0x13, 0x00, 0x00, 0x00, + 0x44, 0x00, 0x05, 0x00, 0x09, 0x00, 0x00, 0x00, 0x19, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x7c, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x1a, 0x00, 0x00, 0x00, + 0x19, 0x00, 0x00, 0x00, 0xaf, 0x00, 0x05, 0x00, 0x1b, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, + 0x14, 0x00, 0x00, 0x00, 0x1a, 0x00, 0x00, 0x00, 0xf7, 0x00, 0x03, 0x00, 0x1e, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0xfa, 0x00, 0x04, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x1d, 0x00, 0x00, 0x00, + 0x1e, 0x00, 0x00, 0x00, 0xf8, 0x00, 0x02, 0x00, 0x1d, 0x00, 0x00, 0x00, 0xf9, 0x00, 0x02, 0x00, + 0x73, 0x00, 0x00, 0x00, 0xf8, 0x00, 0x02, 0x00, 0x1e, 0x00, 0x00, 0x00, 0x41, 0x00, 0x05, 0x00, + 0x26, 0x00, 0x00, 0x00, 0x27, 0x00, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00, 0x25, 0x00, 0x00, 0x00, + 0x3d, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x28, 0x00, 0x00, 0x00, 0x27, 0x00, 0x00, 0x00, + 0xc4, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x29, 0x00, 0x00, 0x00, 0x21, 0x00, 0x00, 0x00, + 0x28, 0x00, 0x00, 0x00, 0x82, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x2e, 0x00, 0x00, 0x00, + 0x2b, 0x00, 0x00, 0x00, 0x28, 0x00, 0x00, 0x00, 0xc4, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, + 0x31, 0x00, 0x00, 0x00, 0x25, 0x00, 0x00, 0x00, 0x2e, 0x00, 0x00, 0x00, 0x82, 0x00, 0x05, 0x00, + 0x06, 0x00, 0x00, 0x00, 0x32, 0x00, 0x00, 0x00, 0x31, 0x00, 0x00, 0x00, 0x25, 0x00, 0x00, 0x00, + 0xf9, 0x00, 0x02, 0x00, 0x35, 0x00, 0x00, 0x00, 0xf8, 0x00, 0x02, 0x00, 0x35, 0x00, 0x00, 0x00, + 0xf5, 0x00, 0x07, 0x00, 0x09, 0x00, 0x00, 0x00, 0x7b, 0x00, 0x00, 0x00, 0x0d, 0x00, 0x00, 0x00, + 0x1e, 0x00, 0x00, 0x00, 0x6f, 0x00, 0x00, 0x00, 0x36, 0x00, 0x00, 0x00, 0xb0, 0x00, 0x05, 0x00, + 0x1b, 0x00, 0x00, 0x00, 0x3c, 0x00, 0x00, 0x00, 0x7b, 0x00, 0x00, 0x00, 0x3b, 0x00, 0x00, 0x00, + 0xf6, 0x00, 0x04, 0x00, 0x37, 0x00, 0x00, 0x00, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xfa, 0x00, 0x04, 0x00, 0x3c, 0x00, 0x00, 0x00, 0x36, 0x00, 0x00, 0x00, 0x37, 0x00, 0x00, 0x00, + 0xf8, 0x00, 0x02, 0x00, 0x36, 0x00, 0x00, 0x00, 0x84, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, + 0x40, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 0x3f, 0x00, 0x00, 0x00, 0x3e, 0x00, 0x03, 0x00, + 0x47, 0x00, 0x00, 0x00, 0x44, 0x00, 0x00, 0x00, 0x41, 0x00, 0x05, 0x00, 0x07, 0x00, 0x00, 0x00, + 0x48, 0x00, 0x00, 0x00, 0x47, 0x00, 0x00, 0x00, 0x7b, 0x00, 0x00, 0x00, 0x3d, 0x00, 0x04, 0x00, + 0x06, 0x00, 0x00, 0x00, 0x49, 0x00, 0x00, 0x00, 0x48, 0x00, 0x00, 0x00, 0x80, 0x00, 0x05, 0x00, + 0x06, 0x00, 0x00, 0x00, 0x4a, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x49, 0x00, 0x00, 0x00, + 0xc3, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x4e, 0x00, 0x00, 0x00, 0x4a, 0x00, 0x00, 0x00, + 0x2e, 0x00, 0x00, 0x00, 0xc7, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x52, 0x00, 0x00, 0x00, + 0x4a, 0x00, 0x00, 0x00, 0x32, 0x00, 0x00, 0x00, 0x84, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, + 0x54, 0x00, 0x00, 0x00, 0x52, 0x00, 0x00, 0x00, 0x29, 0x00, 0x00, 0x00, 0x41, 0x00, 0x06, 0x00, + 0x5b, 0x00, 0x00, 0x00, 0x5c, 0x00, 0x00, 0x00, 0x59, 0x00, 0x00, 0x00, 0x42, 0x00, 0x00, 0x00, + 0x4e, 0x00, 0x00, 0x00, 0x3d, 0x00, 0x04, 0x00, 0x09, 0x00, 0x00, 0x00, 0x5d, 0x00, 0x00, 0x00, + 0x5c, 0x00, 0x00, 0x00, 0xcb, 0x00, 0x06, 0x00, 0x09, 0x00, 0x00, 0x00, 0x62, 0x00, 0x00, 0x00, + 0x5d, 0x00, 0x00, 0x00, 0x54, 0x00, 0x00, 0x00, 0x29, 0x00, 0x00, 0x00, 0x7c, 0x00, 0x04, 0x00, + 0x09, 0x00, 0x00, 0x00, 0x65, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x80, 0x00, 0x05, 0x00, + 0x09, 0x00, 0x00, 0x00, 0x67, 0x00, 0x00, 0x00, 0x65, 0x00, 0x00, 0x00, 0x7b, 0x00, 0x00, 0x00, + 0x41, 0x00, 0x05, 0x00, 0x69, 0x00, 0x00, 0x00, 0x6a, 0x00, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00, + 0x42, 0x00, 0x00, 0x00, 0x3d, 0x00, 0x04, 0x00, 0x09, 0x00, 0x00, 0x00, 0x6b, 0x00, 0x00, 0x00, + 0x6a, 0x00, 0x00, 0x00, 0x80, 0x00, 0x05, 0x00, 0x09, 0x00, 0x00, 0x00, 0x6c, 0x00, 0x00, 0x00, + 0x62, 0x00, 0x00, 0x00, 0x6b, 0x00, 0x00, 0x00, 0x41, 0x00, 0x06, 0x00, 0x5b, 0x00, 0x00, 0x00, + 0x6d, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x42, 0x00, 0x00, 0x00, 0x67, 0x00, 0x00, 0x00, + 0x3e, 0x00, 0x03, 0x00, 0x6d, 0x00, 0x00, 0x00, 0x6c, 0x00, 0x00, 0x00, 0x80, 0x00, 0x05, 0x00, + 0x09, 0x00, 0x00, 0x00, 0x6f, 0x00, 0x00, 0x00, 0x7b, 0x00, 0x00, 0x00, 0x25, 0x00, 0x00, 0x00, + 0xf9, 0x00, 0x02, 0x00, 0x35, 0x00, 0x00, 0x00, 0xf8, 0x00, 0x02, 0x00, 0x37, 0x00, 0x00, 0x00, + 0xf9, 0x00, 0x02, 0x00, 0x73, 0x00, 0x00, 0x00, 0xf8, 0x00, 0x02, 0x00, 0x76, 0x00, 0x00, 0x00, + 0xf9, 0x00, 0x02, 0x00, 0x74, 0x00, 0x00, 0x00, 0xf8, 0x00, 0x02, 0x00, 0x73, 0x00, 0x00, 0x00, + 0xfd, 0x00, 0x01, 0x00, 0x38, 0x00, 0x01, 0x00}; + +std::array<VkDescriptorSetLayoutBinding, 2> BuildInputOutputDescriptorSetBindings() { std::array<VkDescriptorSetLayoutBinding, 2> bindings; bindings[0].binding = 0; bindings[0].descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER; @@ -235,7 +358,7 @@ std::array<VkDescriptorSetLayoutBinding, 2> BuildUint8PassDescriptorSetBindings( return bindings; } -VkDescriptorUpdateTemplateEntryKHR BuildUint8PassDescriptorUpdateTemplateEntry() { +VkDescriptorUpdateTemplateEntryKHR BuildInputOutputDescriptorUpdateTemplate() { VkDescriptorUpdateTemplateEntryKHR entry; entry.dstBinding = 0; entry.dstArrayElement = 0; @@ -337,14 +460,14 @@ QuadArrayPass::QuadArrayPass(const VKDevice& device, VKScheduler& scheduler, VKUpdateDescriptorQueue& update_descriptor_queue) : VKComputePass(device, descriptor_pool, BuildQuadArrayPassDescriptorSetLayoutBinding(), BuildQuadArrayPassDescriptorUpdateTemplateEntry(), - BuildQuadArrayPassPushConstantRange(), std::size(quad_array), quad_array), + BuildComputePushConstantRange(sizeof(u32)), std::size(quad_array), quad_array), scheduler{scheduler}, staging_buffer_pool{staging_buffer_pool}, update_descriptor_queue{update_descriptor_queue} {} QuadArrayPass::~QuadArrayPass() = default; std::pair<VkBuffer, VkDeviceSize> QuadArrayPass::Assemble(u32 num_vertices, u32 first) { - const u32 num_triangle_vertices = num_vertices * 6 / 4; + const u32 num_triangle_vertices = (num_vertices / 4) * 6; const std::size_t staging_size = num_triangle_vertices * sizeof(u32); auto& buffer = staging_buffer_pool.GetUnusedBuffer(staging_size, false); @@ -383,8 +506,8 @@ std::pair<VkBuffer, VkDeviceSize> QuadArrayPass::Assemble(u32 num_vertices, u32 Uint8Pass::Uint8Pass(const VKDevice& device, VKScheduler& scheduler, VKDescriptorPool& descriptor_pool, VKStagingBufferPool& staging_buffer_pool, VKUpdateDescriptorQueue& update_descriptor_queue) - : VKComputePass(device, descriptor_pool, BuildUint8PassDescriptorSetBindings(), - BuildUint8PassDescriptorUpdateTemplateEntry(), {}, std::size(uint8_pass), + : VKComputePass(device, descriptor_pool, BuildInputOutputDescriptorSetBindings(), + BuildInputOutputDescriptorUpdateTemplate(), {}, std::size(uint8_pass), uint8_pass), scheduler{scheduler}, staging_buffer_pool{staging_buffer_pool}, update_descriptor_queue{update_descriptor_queue} {} @@ -425,4 +548,70 @@ std::pair<VkBuffer, u64> Uint8Pass::Assemble(u32 num_vertices, VkBuffer src_buff return {*buffer.handle, 0}; } +QuadIndexedPass::QuadIndexedPass(const VKDevice& device, VKScheduler& scheduler, + VKDescriptorPool& descriptor_pool, + VKStagingBufferPool& staging_buffer_pool, + VKUpdateDescriptorQueue& update_descriptor_queue) + : VKComputePass(device, descriptor_pool, BuildInputOutputDescriptorSetBindings(), + BuildInputOutputDescriptorUpdateTemplate(), + BuildComputePushConstantRange(sizeof(u32) * 2), std::size(QUAD_INDEXED_SPV), + QUAD_INDEXED_SPV), + scheduler{scheduler}, staging_buffer_pool{staging_buffer_pool}, + update_descriptor_queue{update_descriptor_queue} {} + +QuadIndexedPass::~QuadIndexedPass() = default; + +std::pair<VkBuffer, u64> QuadIndexedPass::Assemble( + Tegra::Engines::Maxwell3D::Regs::IndexFormat index_format, u32 num_vertices, u32 base_vertex, + VkBuffer src_buffer, u64 src_offset) { + const u32 index_shift = [index_format] { + switch (index_format) { + case Tegra::Engines::Maxwell3D::Regs::IndexFormat::UnsignedByte: + return 0; + case Tegra::Engines::Maxwell3D::Regs::IndexFormat::UnsignedShort: + return 1; + case Tegra::Engines::Maxwell3D::Regs::IndexFormat::UnsignedInt: + return 2; + } + UNREACHABLE(); + return 2; + }(); + const u32 input_size = num_vertices << index_shift; + const u32 num_tri_vertices = (num_vertices / 4) * 6; + + const std::size_t staging_size = num_tri_vertices * sizeof(u32); + auto& buffer = staging_buffer_pool.GetUnusedBuffer(staging_size, false); + + update_descriptor_queue.Acquire(); + update_descriptor_queue.AddBuffer(src_buffer, src_offset, input_size); + update_descriptor_queue.AddBuffer(*buffer.handle, 0, staging_size); + const auto set = CommitDescriptorSet(update_descriptor_queue, scheduler.GetFence()); + + scheduler.RequestOutsideRenderPassOperationContext(); + scheduler.Record([layout = *layout, pipeline = *pipeline, buffer = *buffer.handle, set, + num_tri_vertices, base_vertex, index_shift](vk::CommandBuffer cmdbuf) { + static constexpr u32 dispatch_size = 1024; + const std::array push_constants = {base_vertex, index_shift}; + cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, pipeline); + cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_COMPUTE, layout, 0, set, {}); + cmdbuf.PushConstants(layout, VK_SHADER_STAGE_COMPUTE_BIT, 0, sizeof(push_constants), + &push_constants); + cmdbuf.Dispatch(Common::AlignUp(num_tri_vertices, dispatch_size) / dispatch_size, 1, 1); + + VkBufferMemoryBarrier barrier; + barrier.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER; + barrier.pNext = nullptr; + barrier.srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT; + barrier.dstAccessMask = VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT; + barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; + barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; + barrier.buffer = buffer; + barrier.offset = 0; + barrier.size = static_cast<VkDeviceSize>(num_tri_vertices * sizeof(u32)); + cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, + VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, 0, {}, barrier, {}); + }); + return {*buffer.handle, 0}; +} + } // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/vk_compute_pass.h b/src/video_core/renderer_vulkan/vk_compute_pass.h index ec80c8683..26bf834de 100644 --- a/src/video_core/renderer_vulkan/vk_compute_pass.h +++ b/src/video_core/renderer_vulkan/vk_compute_pass.h @@ -8,6 +8,7 @@ #include <utility> #include <vector> #include "common/common_types.h" +#include "video_core/engines/maxwell_3d.h" #include "video_core/renderer_vulkan/vk_descriptor_pool.h" #include "video_core/renderer_vulkan/wrapper.h" @@ -73,4 +74,22 @@ private: VKUpdateDescriptorQueue& update_descriptor_queue; }; +class QuadIndexedPass final : public VKComputePass { +public: + explicit QuadIndexedPass(const VKDevice& device, VKScheduler& scheduler, + VKDescriptorPool& descriptor_pool, + VKStagingBufferPool& staging_buffer_pool, + VKUpdateDescriptorQueue& update_descriptor_queue); + ~QuadIndexedPass(); + + std::pair<VkBuffer, u64> Assemble(Tegra::Engines::Maxwell3D::Regs::IndexFormat index_format, + u32 num_vertices, u32 base_vertex, VkBuffer src_buffer, + u64 src_offset); + +private: + VKScheduler& scheduler; + VKStagingBufferPool& staging_buffer_pool; + VKUpdateDescriptorQueue& update_descriptor_queue; +}; + } // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp b/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp index 23beafa4f..52566bb79 100644 --- a/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp +++ b/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp @@ -105,6 +105,8 @@ vk::DescriptorUpdateTemplateKHR VKComputePipeline::CreateDescriptorUpdateTemplat } vk::ShaderModule VKComputePipeline::CreateShaderModule(const std::vector<u32>& code) const { + device.SaveShader(code); + VkShaderModuleCreateInfo ci; ci.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO; ci.pNext = nullptr; diff --git a/src/video_core/renderer_vulkan/vk_device.cpp b/src/video_core/renderer_vulkan/vk_device.cpp index 52d29e49d..e90c76492 100644 --- a/src/video_core/renderer_vulkan/vk_device.cpp +++ b/src/video_core/renderer_vulkan/vk_device.cpp @@ -9,6 +9,7 @@ #include <string_view> #include <thread> #include <unordered_set> +#include <utility> #include <vector> #include "common/assert.h" @@ -167,6 +168,7 @@ bool VKDevice::Create() { VkPhysicalDeviceFeatures2 features2; features2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2; features2.pNext = nullptr; + const void* first_next = &features2; void** next = &features2.pNext; auto& features = features2.features; @@ -296,7 +298,19 @@ bool VKDevice::Create() { LOG_INFO(Render_Vulkan, "Device doesn't support depth range unrestricted"); } - logical = vk::Device::Create(physical, queue_cis, extensions, features2, dld); + VkDeviceDiagnosticsConfigCreateInfoNV diagnostics_nv; + if (nv_device_diagnostics_config) { + nsight_aftermath_tracker.Initialize(); + + diagnostics_nv.sType = VK_STRUCTURE_TYPE_DEVICE_DIAGNOSTICS_CONFIG_CREATE_INFO_NV; + diagnostics_nv.pNext = &features2; + diagnostics_nv.flags = VK_DEVICE_DIAGNOSTICS_CONFIG_ENABLE_SHADER_DEBUG_INFO_BIT_NV | + VK_DEVICE_DIAGNOSTICS_CONFIG_ENABLE_RESOURCE_TRACKING_BIT_NV | + VK_DEVICE_DIAGNOSTICS_CONFIG_ENABLE_AUTOMATIC_CHECKPOINTS_BIT_NV; + first_next = &diagnostics_nv; + } + + logical = vk::Device::Create(physical, queue_cis, extensions, first_next, dld); if (!logical) { LOG_ERROR(Render_Vulkan, "Failed to create logical device"); return false; @@ -344,17 +358,12 @@ VkFormat VKDevice::GetSupportedFormat(VkFormat wanted_format, VkFormatFeatureFla void VKDevice::ReportLoss() const { LOG_CRITICAL(Render_Vulkan, "Device loss occured!"); - // Wait some time to let the log flush - std::this_thread::sleep_for(std::chrono::seconds{1}); - - if (!nv_device_diagnostic_checkpoints) { - return; - } + // Wait for the log to flush and for Nsight Aftermath to dump the results + std::this_thread::sleep_for(std::chrono::seconds{3}); +} - [[maybe_unused]] const std::vector data = graphics_queue.GetCheckpointDataNV(dld); - // Catch here in debug builds (or with optimizations disabled) the last graphics pipeline to be - // executed. It can be done on a debugger by evaluating the expression: - // *(VKGraphicsPipeline*)data[0] +void VKDevice::SaveShader(const std::vector<u32>& spirv) const { + nsight_aftermath_tracker.SaveShader(spirv); } bool VKDevice::IsOptimalAstcSupported(const VkPhysicalDeviceFeatures& features) const { @@ -527,8 +536,8 @@ std::vector<const char*> VKDevice::LoadExtensions() { Test(extension, has_ext_transform_feedback, VK_EXT_TRANSFORM_FEEDBACK_EXTENSION_NAME, false); if (Settings::values.renderer_debug) { - Test(extension, nv_device_diagnostic_checkpoints, - VK_NV_DEVICE_DIAGNOSTIC_CHECKPOINTS_EXTENSION_NAME, true); + Test(extension, nv_device_diagnostics_config, + VK_NV_DEVICE_DIAGNOSTICS_CONFIG_EXTENSION_NAME, true); } } diff --git a/src/video_core/renderer_vulkan/vk_device.h b/src/video_core/renderer_vulkan/vk_device.h index 60d64572a..a4d841e26 100644 --- a/src/video_core/renderer_vulkan/vk_device.h +++ b/src/video_core/renderer_vulkan/vk_device.h @@ -10,6 +10,7 @@ #include <vector> #include "common/common_types.h" +#include "video_core/renderer_vulkan/nsight_aftermath_tracker.h" #include "video_core/renderer_vulkan/wrapper.h" namespace Vulkan { @@ -43,6 +44,9 @@ public: /// Reports a device loss. void ReportLoss() const; + /// Reports a shader to Nsight Aftermath. + void SaveShader(const std::vector<u32>& spirv) const; + /// Returns the dispatch loader with direct function pointers of the device. const vk::DeviceDispatch& GetDispatchLoader() const { return dld; @@ -173,11 +177,6 @@ public: return ext_transform_feedback; } - /// Returns true if the device supports VK_NV_device_diagnostic_checkpoints. - bool IsNvDeviceDiagnosticCheckpoints() const { - return nv_device_diagnostic_checkpoints; - } - /// Returns the vendor name reported from Vulkan. std::string_view GetVendorName() const { return vendor_name; @@ -233,7 +232,7 @@ private: bool ext_depth_range_unrestricted{}; ///< Support for VK_EXT_depth_range_unrestricted. bool ext_shader_viewport_index_layer{}; ///< Support for VK_EXT_shader_viewport_index_layer. bool ext_transform_feedback{}; ///< Support for VK_EXT_transform_feedback. - bool nv_device_diagnostic_checkpoints{}; ///< Support for VK_NV_device_diagnostic_checkpoints. + bool nv_device_diagnostics_config{}; ///< Support for VK_NV_device_diagnostics_config. // Telemetry parameters std::string vendor_name; ///< Device's driver name. @@ -241,6 +240,9 @@ private: /// Format properties dictionary. std::unordered_map<VkFormat, VkFormatProperties> format_properties; + + /// Nsight Aftermath GPU crash tracker + NsightAftermathTracker nsight_aftermath_tracker; }; } // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/vk_fence_manager.cpp b/src/video_core/renderer_vulkan/vk_fence_manager.cpp new file mode 100644 index 000000000..a02be5487 --- /dev/null +++ b/src/video_core/renderer_vulkan/vk_fence_manager.cpp @@ -0,0 +1,101 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include <memory> +#include <thread> + +#include "video_core/renderer_vulkan/vk_buffer_cache.h" +#include "video_core/renderer_vulkan/vk_device.h" +#include "video_core/renderer_vulkan/vk_fence_manager.h" +#include "video_core/renderer_vulkan/vk_scheduler.h" +#include "video_core/renderer_vulkan/vk_texture_cache.h" +#include "video_core/renderer_vulkan/wrapper.h" + +namespace Vulkan { + +InnerFence::InnerFence(const VKDevice& device, VKScheduler& scheduler, u32 payload, bool is_stubbed) + : VideoCommon::FenceBase(payload, is_stubbed), device{device}, scheduler{scheduler} {} + +InnerFence::InnerFence(const VKDevice& device, VKScheduler& scheduler, GPUVAddr address, + u32 payload, bool is_stubbed) + : VideoCommon::FenceBase(address, payload, is_stubbed), device{device}, scheduler{scheduler} {} + +InnerFence::~InnerFence() = default; + +void InnerFence::Queue() { + if (is_stubbed) { + return; + } + ASSERT(!event); + + event = device.GetLogical().CreateEvent(); + ticks = scheduler.Ticks(); + + scheduler.RequestOutsideRenderPassOperationContext(); + scheduler.Record([event = *event](vk::CommandBuffer cmdbuf) { + cmdbuf.SetEvent(event, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT); + }); +} + +bool InnerFence::IsSignaled() const { + if (is_stubbed) { + return true; + } + ASSERT(event); + return IsEventSignalled(); +} + +void InnerFence::Wait() { + if (is_stubbed) { + return; + } + ASSERT(event); + + if (ticks >= scheduler.Ticks()) { + scheduler.Flush(); + } + while (!IsEventSignalled()) { + std::this_thread::yield(); + } +} + +bool InnerFence::IsEventSignalled() const { + switch (const VkResult result = event.GetStatus()) { + case VK_EVENT_SET: + return true; + case VK_EVENT_RESET: + return false; + default: + throw vk::Exception(result); + } +} + +VKFenceManager::VKFenceManager(Core::System& system, VideoCore::RasterizerInterface& rasterizer, + const VKDevice& device, VKScheduler& scheduler, + VKTextureCache& texture_cache, VKBufferCache& buffer_cache, + VKQueryCache& query_cache) + : GenericFenceManager(system, rasterizer, texture_cache, buffer_cache, query_cache), + device{device}, scheduler{scheduler} {} + +Fence VKFenceManager::CreateFence(u32 value, bool is_stubbed) { + return std::make_shared<InnerFence>(device, scheduler, value, is_stubbed); +} + +Fence VKFenceManager::CreateFence(GPUVAddr addr, u32 value, bool is_stubbed) { + return std::make_shared<InnerFence>(device, scheduler, addr, value, is_stubbed); +} + +void VKFenceManager::QueueFence(Fence& fence) { + fence->Queue(); +} + +bool VKFenceManager::IsFenceSignaled(Fence& fence) const { + return fence->IsSignaled(); +} + +void VKFenceManager::WaitFence(Fence& fence) { + fence->Wait(); +} + +} // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/vk_fence_manager.h b/src/video_core/renderer_vulkan/vk_fence_manager.h new file mode 100644 index 000000000..04d07fe6a --- /dev/null +++ b/src/video_core/renderer_vulkan/vk_fence_manager.h @@ -0,0 +1,74 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include <memory> + +#include "video_core/fence_manager.h" +#include "video_core/renderer_vulkan/wrapper.h" + +namespace Core { +class System; +} + +namespace VideoCore { +class RasterizerInterface; +} + +namespace Vulkan { + +class VKBufferCache; +class VKDevice; +class VKQueryCache; +class VKScheduler; +class VKTextureCache; + +class InnerFence : public VideoCommon::FenceBase { +public: + explicit InnerFence(const VKDevice& device, VKScheduler& scheduler, u32 payload, + bool is_stubbed); + explicit InnerFence(const VKDevice& device, VKScheduler& scheduler, GPUVAddr address, + u32 payload, bool is_stubbed); + ~InnerFence(); + + void Queue(); + + bool IsSignaled() const; + + void Wait(); + +private: + bool IsEventSignalled() const; + + const VKDevice& device; + VKScheduler& scheduler; + vk::Event event; + u64 ticks = 0; +}; +using Fence = std::shared_ptr<InnerFence>; + +using GenericFenceManager = + VideoCommon::FenceManager<Fence, VKTextureCache, VKBufferCache, VKQueryCache>; + +class VKFenceManager final : public GenericFenceManager { +public: + explicit VKFenceManager(Core::System& system, VideoCore::RasterizerInterface& rasterizer, + const VKDevice& device, VKScheduler& scheduler, + VKTextureCache& texture_cache, VKBufferCache& buffer_cache, + VKQueryCache& query_cache); + +protected: + Fence CreateFence(u32 value, bool is_stubbed) override; + Fence CreateFence(GPUVAddr addr, u32 value, bool is_stubbed) override; + void QueueFence(Fence& fence) override; + bool IsFenceSignaled(Fence& fence) const override; + void WaitFence(Fence& fence) override; + +private: + const VKDevice& device; + VKScheduler& scheduler; +}; + +} // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp index b540b838d..8332b42aa 100644 --- a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp +++ b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp @@ -26,12 +26,13 @@ MICROPROFILE_DECLARE(Vulkan_PipelineCache); namespace { -VkStencilOpState GetStencilFaceState(const FixedPipelineState::StencilFace& face) { +template <class StencilFace> +VkStencilOpState GetStencilFaceState(const StencilFace& face) { VkStencilOpState state; - state.failOp = MaxwellToVK::StencilOp(face.action_stencil_fail); - state.passOp = MaxwellToVK::StencilOp(face.action_depth_pass); - state.depthFailOp = MaxwellToVK::StencilOp(face.action_depth_fail); - state.compareOp = MaxwellToVK::ComparisonOp(face.test_func); + state.failOp = MaxwellToVK::StencilOp(face.ActionStencilFail()); + state.passOp = MaxwellToVK::StencilOp(face.ActionDepthPass()); + state.depthFailOp = MaxwellToVK::StencilOp(face.ActionDepthFail()); + state.compareOp = MaxwellToVK::ComparisonOp(face.TestFunc()); state.compareMask = 0; state.writeMask = 0; state.reference = 0; @@ -147,6 +148,8 @@ std::vector<vk::ShaderModule> VKGraphicsPipeline::CreateShaderModules( continue; } + device.SaveShader(stage->code); + ci.codeSize = stage->code.size() * sizeof(u32); ci.pCode = stage->code.data(); modules.push_back(device.GetLogical().CreateShaderModule(ci)); @@ -157,43 +160,47 @@ std::vector<vk::ShaderModule> VKGraphicsPipeline::CreateShaderModules( vk::Pipeline VKGraphicsPipeline::CreatePipeline(const RenderPassParams& renderpass_params, const SPIRVProgram& program) const { const auto& vi = fixed_state.vertex_input; - const auto& ia = fixed_state.input_assembly; const auto& ds = fixed_state.depth_stencil; const auto& cd = fixed_state.color_blending; - const auto& ts = fixed_state.tessellation; const auto& rs = fixed_state.rasterizer; std::vector<VkVertexInputBindingDescription> vertex_bindings; std::vector<VkVertexInputBindingDivisorDescriptionEXT> vertex_binding_divisors; - for (std::size_t i = 0; i < vi.num_bindings; ++i) { - const auto& binding = vi.bindings[i]; - const bool instanced = binding.divisor != 0; + for (std::size_t index = 0; index < std::size(vi.bindings); ++index) { + const auto& binding = vi.bindings[index]; + if (!binding.enabled) { + continue; + } + const bool instanced = vi.binding_divisors[index] != 0; const auto rate = instanced ? VK_VERTEX_INPUT_RATE_INSTANCE : VK_VERTEX_INPUT_RATE_VERTEX; auto& vertex_binding = vertex_bindings.emplace_back(); - vertex_binding.binding = binding.index; + vertex_binding.binding = static_cast<u32>(index); vertex_binding.stride = binding.stride; vertex_binding.inputRate = rate; if (instanced) { auto& binding_divisor = vertex_binding_divisors.emplace_back(); - binding_divisor.binding = binding.index; - binding_divisor.divisor = binding.divisor; + binding_divisor.binding = static_cast<u32>(index); + binding_divisor.divisor = vi.binding_divisors[index]; } } std::vector<VkVertexInputAttributeDescription> vertex_attributes; const auto& input_attributes = program[0]->entries.attributes; - for (std::size_t i = 0; i < vi.num_attributes; ++i) { - const auto& attribute = vi.attributes[i]; - if (input_attributes.find(attribute.index) == input_attributes.end()) { + for (std::size_t index = 0; index < std::size(vi.attributes); ++index) { + const auto& attribute = vi.attributes[index]; + if (!attribute.enabled) { + continue; + } + if (input_attributes.find(static_cast<u32>(index)) == input_attributes.end()) { // Skip attributes not used by the vertex shaders. continue; } auto& vertex_attribute = vertex_attributes.emplace_back(); - vertex_attribute.location = attribute.index; + vertex_attribute.location = static_cast<u32>(index); vertex_attribute.binding = attribute.buffer; - vertex_attribute.format = MaxwellToVK::VertexFormat(attribute.type, attribute.size); + vertex_attribute.format = MaxwellToVK::VertexFormat(attribute.Type(), attribute.Size()); vertex_attribute.offset = attribute.offset; } @@ -219,15 +226,15 @@ vk::Pipeline VKGraphicsPipeline::CreatePipeline(const RenderPassParams& renderpa input_assembly_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO; input_assembly_ci.pNext = nullptr; input_assembly_ci.flags = 0; - input_assembly_ci.topology = MaxwellToVK::PrimitiveTopology(device, ia.topology); + input_assembly_ci.topology = MaxwellToVK::PrimitiveTopology(device, rs.Topology()); input_assembly_ci.primitiveRestartEnable = - ia.primitive_restart_enable && SupportsPrimitiveRestart(input_assembly_ci.topology); + rs.primitive_restart_enable != 0 && SupportsPrimitiveRestart(input_assembly_ci.topology); VkPipelineTessellationStateCreateInfo tessellation_ci; tessellation_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO; tessellation_ci.pNext = nullptr; tessellation_ci.flags = 0; - tessellation_ci.patchControlPoints = ts.patch_control_points; + tessellation_ci.patchControlPoints = rs.patch_control_points_minus_one.Value() + 1; VkPipelineViewportStateCreateInfo viewport_ci; viewport_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO; @@ -246,8 +253,8 @@ vk::Pipeline VKGraphicsPipeline::CreatePipeline(const RenderPassParams& renderpa rasterization_ci.rasterizerDiscardEnable = VK_FALSE; rasterization_ci.polygonMode = VK_POLYGON_MODE_FILL; rasterization_ci.cullMode = - rs.cull_enable ? MaxwellToVK::CullFace(rs.cull_face) : VK_CULL_MODE_NONE; - rasterization_ci.frontFace = MaxwellToVK::FrontFace(rs.front_face); + rs.cull_enable ? MaxwellToVK::CullFace(rs.CullFace()) : VK_CULL_MODE_NONE; + rasterization_ci.frontFace = MaxwellToVK::FrontFace(rs.FrontFace()); rasterization_ci.depthBiasEnable = rs.depth_bias_enable; rasterization_ci.depthBiasConstantFactor = 0.0f; rasterization_ci.depthBiasClamp = 0.0f; @@ -271,40 +278,38 @@ vk::Pipeline VKGraphicsPipeline::CreatePipeline(const RenderPassParams& renderpa depth_stencil_ci.flags = 0; depth_stencil_ci.depthTestEnable = ds.depth_test_enable; depth_stencil_ci.depthWriteEnable = ds.depth_write_enable; - depth_stencil_ci.depthCompareOp = ds.depth_test_enable - ? MaxwellToVK::ComparisonOp(ds.depth_test_function) - : VK_COMPARE_OP_ALWAYS; + depth_stencil_ci.depthCompareOp = + ds.depth_test_enable ? MaxwellToVK::ComparisonOp(ds.DepthTestFunc()) : VK_COMPARE_OP_ALWAYS; depth_stencil_ci.depthBoundsTestEnable = ds.depth_bounds_enable; depth_stencil_ci.stencilTestEnable = ds.stencil_enable; - depth_stencil_ci.front = GetStencilFaceState(ds.front_stencil); - depth_stencil_ci.back = GetStencilFaceState(ds.back_stencil); + depth_stencil_ci.front = GetStencilFaceState(ds.front); + depth_stencil_ci.back = GetStencilFaceState(ds.back); depth_stencil_ci.minDepthBounds = 0.0f; depth_stencil_ci.maxDepthBounds = 0.0f; std::array<VkPipelineColorBlendAttachmentState, Maxwell::NumRenderTargets> cb_attachments; - const std::size_t num_attachments = - std::min(cd.attachments_count, renderpass_params.color_attachments.size()); - for (std::size_t i = 0; i < num_attachments; ++i) { - static constexpr std::array component_table = { + const std::size_t num_attachments = renderpass_params.color_attachments.size(); + for (std::size_t index = 0; index < num_attachments; ++index) { + static constexpr std::array COMPONENT_TABLE = { VK_COLOR_COMPONENT_R_BIT, VK_COLOR_COMPONENT_G_BIT, VK_COLOR_COMPONENT_B_BIT, VK_COLOR_COMPONENT_A_BIT}; - const auto& blend = cd.attachments[i]; + const auto& blend = cd.attachments[index]; VkColorComponentFlags color_components = 0; - for (std::size_t j = 0; j < component_table.size(); ++j) { - if (blend.components[j]) { - color_components |= component_table[j]; + for (std::size_t i = 0; i < COMPONENT_TABLE.size(); ++i) { + if (blend.Mask()[i]) { + color_components |= COMPONENT_TABLE[i]; } } - VkPipelineColorBlendAttachmentState& attachment = cb_attachments[i]; - attachment.blendEnable = blend.enable; - attachment.srcColorBlendFactor = MaxwellToVK::BlendFactor(blend.src_rgb_func); - attachment.dstColorBlendFactor = MaxwellToVK::BlendFactor(blend.dst_rgb_func); - attachment.colorBlendOp = MaxwellToVK::BlendEquation(blend.rgb_equation); - attachment.srcAlphaBlendFactor = MaxwellToVK::BlendFactor(blend.src_a_func); - attachment.dstAlphaBlendFactor = MaxwellToVK::BlendFactor(blend.dst_a_func); - attachment.alphaBlendOp = MaxwellToVK::BlendEquation(blend.a_equation); + VkPipelineColorBlendAttachmentState& attachment = cb_attachments[index]; + attachment.blendEnable = blend.enable != 0; + attachment.srcColorBlendFactor = MaxwellToVK::BlendFactor(blend.SourceRGBFactor()); + attachment.dstColorBlendFactor = MaxwellToVK::BlendFactor(blend.DestRGBFactor()); + attachment.colorBlendOp = MaxwellToVK::BlendEquation(blend.EquationRGB()); + attachment.srcAlphaBlendFactor = MaxwellToVK::BlendFactor(blend.SourceAlphaFactor()); + attachment.dstAlphaBlendFactor = MaxwellToVK::BlendFactor(blend.DestAlphaFactor()); + attachment.alphaBlendOp = MaxwellToVK::BlendEquation(blend.EquationAlpha()); attachment.colorWriteMask = color_components; } diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp index 90e3a8edd..91b1b16a5 100644 --- a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp +++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp @@ -207,7 +207,7 @@ std::array<Shader, Maxwell::MaxShaderProgram> VKPipelineCache::GetShaders() { const GPUVAddr program_addr{GetShaderAddress(system, program)}; const std::optional cpu_addr = memory_manager.GpuToCpuAddress(program_addr); ASSERT(cpu_addr); - auto shader = cpu_addr ? TryGet(*cpu_addr) : nullptr; + auto shader = cpu_addr ? TryGet(*cpu_addr) : null_shader; if (!shader) { const auto host_ptr{memory_manager.GetPointer(program_addr)}; @@ -218,7 +218,11 @@ std::array<Shader, Maxwell::MaxShaderProgram> VKPipelineCache::GetShaders() { shader = std::make_shared<CachedShader>(system, stage, program_addr, *cpu_addr, std::move(code), stage_offset); - Register(shader); + if (cpu_addr) { + Register(shader); + } else { + null_shader = shader; + } } shaders[index] = std::move(shader); } @@ -261,7 +265,7 @@ VKComputePipeline& VKPipelineCache::GetComputePipeline(const ComputePipelineCach const auto cpu_addr = memory_manager.GpuToCpuAddress(program_addr); ASSERT(cpu_addr); - auto shader = cpu_addr ? TryGet(*cpu_addr) : nullptr; + auto shader = cpu_addr ? TryGet(*cpu_addr) : null_kernel; if (!shader) { // No shader found - create a new one const auto host_ptr = memory_manager.GetPointer(program_addr); @@ -271,7 +275,11 @@ VKComputePipeline& VKPipelineCache::GetComputePipeline(const ComputePipelineCach shader = std::make_shared<CachedShader>(system, Tegra::Engines::ShaderType::Compute, program_addr, *cpu_addr, std::move(code), kernel_main_offset); - Register(shader); + if (cpu_addr) { + Register(shader); + } else { + null_kernel = shader; + } } Specialization specialization; @@ -329,12 +337,14 @@ VKPipelineCache::DecompileShaders(const GraphicsPipelineCacheKey& key) { const auto& gpu = system.GPU().Maxwell3D(); Specialization specialization; - if (fixed_state.input_assembly.topology == Maxwell::PrimitiveTopology::Points) { - ASSERT(fixed_state.input_assembly.point_size != 0.0f); - specialization.point_size = fixed_state.input_assembly.point_size; + if (fixed_state.rasterizer.Topology() == Maxwell::PrimitiveTopology::Points) { + float point_size; + std::memcpy(&point_size, &fixed_state.rasterizer.point_size, sizeof(float)); + specialization.point_size = point_size; + ASSERT(point_size != 0.0f); } for (std::size_t i = 0; i < Maxwell::NumVertexAttributes; ++i) { - specialization.attribute_types[i] = fixed_state.vertex_input.attributes[i].type; + specialization.attribute_types[i] = fixed_state.vertex_input.attributes[i].Type(); } specialization.ndc_minus_one_to_one = fixed_state.rasterizer.ndc_minus_one_to_one; diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.h b/src/video_core/renderer_vulkan/vk_pipeline_cache.h index 7ccdb7083..602a0a340 100644 --- a/src/video_core/renderer_vulkan/vk_pipeline_cache.h +++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.h @@ -182,6 +182,9 @@ private: VKUpdateDescriptorQueue& update_descriptor_queue; VKRenderPassCache& renderpass_cache; + Shader null_shader{}; + Shader null_kernel{}; + std::array<Shader, Maxwell::MaxShaderProgram> last_shaders; GraphicsPipelineCacheKey last_graphics_key; diff --git a/src/video_core/renderer_vulkan/vk_query_cache.cpp b/src/video_core/renderer_vulkan/vk_query_cache.cpp index 0966c7ff7..813f7c162 100644 --- a/src/video_core/renderer_vulkan/vk_query_cache.cpp +++ b/src/video_core/renderer_vulkan/vk_query_cache.cpp @@ -113,8 +113,19 @@ u64 HostCounter::BlockingQuery() const { if (ticks >= cache.Scheduler().Ticks()) { cache.Scheduler().Flush(); } - return cache.Device().GetLogical().GetQueryResult<u64>( - query.first, query.second, VK_QUERY_RESULT_64_BIT | VK_QUERY_RESULT_WAIT_BIT); + u64 data; + const VkResult result = cache.Device().GetLogical().GetQueryResults( + query.first, query.second, 1, sizeof(data), &data, sizeof(data), + VK_QUERY_RESULT_64_BIT | VK_QUERY_RESULT_WAIT_BIT); + switch (result) { + case VK_SUCCESS: + return data; + case VK_ERROR_DEVICE_LOST: + cache.Device().ReportLoss(); + [[fallthrough]]; + default: + throw vk::Exception(result); + } } } // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp index 4ca0febb8..8a1f57891 100644 --- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp +++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp @@ -17,6 +17,7 @@ #include "common/microprofile.h" #include "core/core.h" #include "core/memory.h" +#include "core/settings.h" #include "video_core/engines/kepler_compute.h" #include "video_core/engines/maxwell_3d.h" #include "video_core/renderer_vulkan/fixed_pipeline_state.h" @@ -292,13 +293,16 @@ RasterizerVulkan::RasterizerVulkan(Core::System& system, Core::Frontend::EmuWind staging_pool(device, memory_manager, scheduler), descriptor_pool(device), update_descriptor_queue(device, scheduler), renderpass_cache(device), quad_array_pass(device, scheduler, descriptor_pool, staging_pool, update_descriptor_queue), + quad_indexed_pass(device, scheduler, descriptor_pool, staging_pool, update_descriptor_queue), uint8_pass(device, scheduler, descriptor_pool, staging_pool, update_descriptor_queue), texture_cache(system, *this, device, resource_manager, memory_manager, scheduler, staging_pool), pipeline_cache(system, *this, device, scheduler, descriptor_pool, update_descriptor_queue, renderpass_cache), buffer_cache(*this, system, device, memory_manager, scheduler, staging_pool), - sampler_cache(device), query_cache(system, *this, device, scheduler) { + sampler_cache(device), + fence_manager(system, *this, device, scheduler, texture_cache, buffer_cache, query_cache), + query_cache(system, *this, device, scheduler) { scheduler.SetQueryCache(query_cache); } @@ -346,11 +350,6 @@ void RasterizerVulkan::Draw(bool is_indexed, bool is_instanced) { buffer_bindings.Bind(scheduler); - if (device.IsNvDeviceDiagnosticCheckpoints()) { - scheduler.Record( - [&pipeline](vk::CommandBuffer cmdbuf) { cmdbuf.SetCheckpointNV(&pipeline); }); - } - BeginTransformFeedback(); const auto pipeline_layout = pipeline.GetLayout(); @@ -364,6 +363,8 @@ void RasterizerVulkan::Draw(bool is_indexed, bool is_instanced) { }); EndTransformFeedback(); + + system.GPU().TickWork(); } void RasterizerVulkan::Clear() { @@ -477,11 +478,6 @@ void RasterizerVulkan::DispatchCompute(GPUVAddr code_addr) { TransitionImages(image_views, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT); - if (device.IsNvDeviceDiagnosticCheckpoints()) { - scheduler.Record( - [&pipeline](vk::CommandBuffer cmdbuf) { cmdbuf.SetCheckpointNV(nullptr); }); - } - scheduler.Record([grid_x = launch_desc.grid_dim_x, grid_y = launch_desc.grid_dim_y, grid_z = launch_desc.grid_dim_z, pipeline_handle = pipeline.GetHandle(), layout = pipeline.GetLayout(), @@ -513,6 +509,13 @@ void RasterizerVulkan::FlushRegion(VAddr addr, u64 size) { query_cache.FlushRegion(addr, size); } +bool RasterizerVulkan::MustFlushRegion(VAddr addr, u64 size) { + if (!Settings::IsGPULevelHigh()) { + return buffer_cache.MustFlushRegion(addr, size); + } + return texture_cache.MustFlushRegion(addr, size) || buffer_cache.MustFlushRegion(addr, size); +} + void RasterizerVulkan::InvalidateRegion(VAddr addr, u64 size) { if (addr == 0 || size == 0) { return; @@ -523,6 +526,47 @@ void RasterizerVulkan::InvalidateRegion(VAddr addr, u64 size) { query_cache.InvalidateRegion(addr, size); } +void RasterizerVulkan::OnCPUWrite(VAddr addr, u64 size) { + if (addr == 0 || size == 0) { + return; + } + texture_cache.OnCPUWrite(addr, size); + pipeline_cache.InvalidateRegion(addr, size); + buffer_cache.OnCPUWrite(addr, size); + query_cache.InvalidateRegion(addr, size); +} + +void RasterizerVulkan::SyncGuestHost() { + texture_cache.SyncGuestHost(); + buffer_cache.SyncGuestHost(); +} + +void RasterizerVulkan::SignalSemaphore(GPUVAddr addr, u32 value) { + auto& gpu{system.GPU()}; + if (!gpu.IsAsync()) { + gpu.MemoryManager().Write<u32>(addr, value); + return; + } + fence_manager.SignalSemaphore(addr, value); +} + +void RasterizerVulkan::SignalSyncPoint(u32 value) { + auto& gpu{system.GPU()}; + if (!gpu.IsAsync()) { + gpu.IncrementSyncPoint(value); + return; + } + fence_manager.SignalSyncPoint(value); +} + +void RasterizerVulkan::ReleaseFences() { + auto& gpu{system.GPU()}; + if (!gpu.IsAsync()) { + return; + } + fence_manager.WaitPendingFences(); +} + void RasterizerVulkan::FlushAndInvalidateRegion(VAddr addr, u64 size) { FlushRegion(addr, size); InvalidateRegion(addr, size); @@ -806,25 +850,29 @@ void RasterizerVulkan::SetupVertexArrays(FixedPipelineState::VertexInput& vertex BufferBindings& buffer_bindings) { const auto& regs = system.GPU().Maxwell3D().regs; - for (u32 index = 0; index < static_cast<u32>(Maxwell::NumVertexAttributes); ++index) { + for (std::size_t index = 0; index < Maxwell::NumVertexAttributes; ++index) { const auto& attrib = regs.vertex_attrib_format[index]; if (!attrib.IsValid()) { + vertex_input.SetAttribute(index, false, 0, 0, {}, {}); continue; } - const auto& buffer = regs.vertex_array[attrib.buffer]; + [[maybe_unused]] const auto& buffer = regs.vertex_array[attrib.buffer]; ASSERT(buffer.IsEnabled()); - vertex_input.attributes[vertex_input.num_attributes++] = - FixedPipelineState::VertexAttribute(index, attrib.buffer, attrib.type, attrib.size, - attrib.offset); + vertex_input.SetAttribute(index, true, attrib.buffer, attrib.offset, attrib.type.Value(), + attrib.size.Value()); } - for (u32 index = 0; index < static_cast<u32>(Maxwell::NumVertexArrays); ++index) { + for (std::size_t index = 0; index < Maxwell::NumVertexArrays; ++index) { const auto& vertex_array = regs.vertex_array[index]; if (!vertex_array.IsEnabled()) { + vertex_input.SetBinding(index, false, 0, 0); continue; } + vertex_input.SetBinding( + index, true, vertex_array.stride, + regs.instanced_arrays.IsInstancingEnabled(index) ? vertex_array.divisor : 0); const GPUVAddr start{vertex_array.StartAddress()}; const GPUVAddr end{regs.vertex_array_limit[index].LimitAddress()}; @@ -832,10 +880,6 @@ void RasterizerVulkan::SetupVertexArrays(FixedPipelineState::VertexInput& vertex ASSERT(end > start); const std::size_t size{end - start + 1}; const auto [buffer, offset] = buffer_cache.UploadMemory(start, size); - - vertex_input.bindings[vertex_input.num_bindings++] = FixedPipelineState::VertexBinding( - index, vertex_array.stride, - regs.instanced_arrays.IsInstancingEnabled(index) ? vertex_array.divisor : 0); buffer_bindings.AddVertexBinding(buffer, offset); } } @@ -844,18 +888,26 @@ void RasterizerVulkan::SetupIndexBuffer(BufferBindings& buffer_bindings, DrawPar bool is_indexed) { const auto& regs = system.GPU().Maxwell3D().regs; switch (regs.draw.topology) { - case Maxwell::PrimitiveTopology::Quads: - if (params.is_indexed) { - UNIMPLEMENTED(); - } else { + case Maxwell::PrimitiveTopology::Quads: { + if (!params.is_indexed) { const auto [buffer, offset] = quad_array_pass.Assemble(params.num_vertices, params.base_vertex); buffer_bindings.SetIndexBinding(buffer, offset, VK_INDEX_TYPE_UINT32); params.base_vertex = 0; params.num_vertices = params.num_vertices * 6 / 4; params.is_indexed = true; + break; } + const GPUVAddr gpu_addr = regs.index_array.IndexStart(); + auto [buffer, offset] = buffer_cache.UploadMemory(gpu_addr, CalculateIndexBufferSize()); + std::tie(buffer, offset) = quad_indexed_pass.Assemble( + regs.index_array.format, params.num_vertices, params.base_vertex, buffer, offset); + + buffer_bindings.SetIndexBinding(buffer, offset, VK_INDEX_TYPE_UINT32); + params.num_vertices = (params.num_vertices / 4) * 6; + params.base_vertex = 0; break; + } default: { if (!is_indexed) { break; diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.h b/src/video_core/renderer_vulkan/vk_rasterizer.h index 46037860a..2fa46b0cc 100644 --- a/src/video_core/renderer_vulkan/vk_rasterizer.h +++ b/src/video_core/renderer_vulkan/vk_rasterizer.h @@ -21,6 +21,7 @@ #include "video_core/renderer_vulkan/vk_buffer_cache.h" #include "video_core/renderer_vulkan/vk_compute_pass.h" #include "video_core/renderer_vulkan/vk_descriptor_pool.h" +#include "video_core/renderer_vulkan/vk_fence_manager.h" #include "video_core/renderer_vulkan/vk_memory_manager.h" #include "video_core/renderer_vulkan/vk_pipeline_cache.h" #include "video_core/renderer_vulkan/vk_query_cache.h" @@ -118,7 +119,13 @@ public: void Query(GPUVAddr gpu_addr, VideoCore::QueryType type, std::optional<u64> timestamp) override; void FlushAll() override; void FlushRegion(VAddr addr, u64 size) override; + bool MustFlushRegion(VAddr addr, u64 size) override; void InvalidateRegion(VAddr addr, u64 size) override; + void OnCPUWrite(VAddr addr, u64 size) override; + void SyncGuestHost() override; + void SignalSemaphore(GPUVAddr addr, u32 value) override; + void SignalSyncPoint(u32 value) override; + void ReleaseFences() override; void FlushAndInvalidateRegion(VAddr addr, u64 size) override; void FlushCommands() override; void TickFrame() override; @@ -254,12 +261,14 @@ private: VKUpdateDescriptorQueue update_descriptor_queue; VKRenderPassCache renderpass_cache; QuadArrayPass quad_array_pass; + QuadIndexedPass quad_indexed_pass; Uint8Pass uint8_pass; VKTextureCache texture_cache; VKPipelineCache pipeline_cache; VKBufferCache buffer_cache; VKSamplerCache sampler_cache; + VKFenceManager fence_manager; VKQueryCache query_cache; std::array<View, Maxwell::NumRenderTargets> color_attachments; diff --git a/src/video_core/renderer_vulkan/vk_scheduler.cpp b/src/video_core/renderer_vulkan/vk_scheduler.cpp index 900f551b3..ae7ba3eb5 100644 --- a/src/video_core/renderer_vulkan/vk_scheduler.cpp +++ b/src/video_core/renderer_vulkan/vk_scheduler.cpp @@ -166,7 +166,15 @@ void VKScheduler::SubmitExecution(VkSemaphore semaphore) { submit_info.pCommandBuffers = current_cmdbuf.address(); submit_info.signalSemaphoreCount = semaphore ? 1 : 0; submit_info.pSignalSemaphores = &semaphore; - device.GetGraphicsQueue().Submit(submit_info, *current_fence); + switch (const VkResult result = device.GetGraphicsQueue().Submit(submit_info, *current_fence)) { + case VK_SUCCESS: + break; + case VK_ERROR_DEVICE_LOST: + device.ReportLoss(); + [[fallthrough]]; + default: + vk::Check(result); + } } void VKScheduler::AllocateNewContext() { diff --git a/src/video_core/renderer_vulkan/vk_stream_buffer.cpp b/src/video_core/renderer_vulkan/vk_stream_buffer.cpp index 38a93a01a..868447af2 100644 --- a/src/video_core/renderer_vulkan/vk_stream_buffer.cpp +++ b/src/video_core/renderer_vulkan/vk_stream_buffer.cpp @@ -3,6 +3,7 @@ // Refer to the license.txt file included. #include <algorithm> +#include <limits> #include <optional> #include <tuple> #include <vector> @@ -22,22 +23,38 @@ namespace { constexpr u64 WATCHES_INITIAL_RESERVE = 0x4000; constexpr u64 WATCHES_RESERVE_CHUNK = 0x1000; -constexpr u64 STREAM_BUFFER_SIZE = 256 * 1024 * 1024; +constexpr u64 PREFERRED_STREAM_BUFFER_SIZE = 256 * 1024 * 1024; -std::optional<u32> FindMemoryType(const VKDevice& device, u32 filter, - VkMemoryPropertyFlags wanted) { - const auto properties = device.GetPhysical().GetMemoryProperties(); - for (u32 i = 0; i < properties.memoryTypeCount; i++) { - if (!(filter & (1 << i))) { - continue; - } - if ((properties.memoryTypes[i].propertyFlags & wanted) == wanted) { +/// Find a memory type with the passed requirements +std::optional<u32> FindMemoryType(const VkPhysicalDeviceMemoryProperties& properties, + VkMemoryPropertyFlags wanted, + u32 filter = std::numeric_limits<u32>::max()) { + for (u32 i = 0; i < properties.memoryTypeCount; ++i) { + const auto flags = properties.memoryTypes[i].propertyFlags; + if ((flags & wanted) == wanted && (filter & (1U << i)) != 0) { return i; } } return std::nullopt; } +/// Get the preferred host visible memory type. +u32 GetMemoryType(const VkPhysicalDeviceMemoryProperties& properties, + u32 filter = std::numeric_limits<u32>::max()) { + // Prefer device local host visible allocations. Both AMD and Nvidia now provide one. + // Otherwise search for a host visible allocation. + static constexpr auto HOST_MEMORY = + VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT; + static constexpr auto DYNAMIC_MEMORY = HOST_MEMORY | VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + + std::optional preferred_type = FindMemoryType(properties, DYNAMIC_MEMORY); + if (!preferred_type) { + preferred_type = FindMemoryType(properties, HOST_MEMORY); + ASSERT_MSG(preferred_type, "No host visible and coherent memory type found"); + } + return preferred_type.value_or(0); +} + } // Anonymous namespace VKStreamBuffer::VKStreamBuffer(const VKDevice& device, VKScheduler& scheduler, @@ -51,7 +68,7 @@ VKStreamBuffer::VKStreamBuffer(const VKDevice& device, VKScheduler& scheduler, VKStreamBuffer::~VKStreamBuffer() = default; std::tuple<u8*, u64, bool> VKStreamBuffer::Map(u64 size, u64 alignment) { - ASSERT(size <= STREAM_BUFFER_SIZE); + ASSERT(size <= stream_buffer_size); mapped_size = size; if (alignment > 0) { @@ -61,7 +78,7 @@ std::tuple<u8*, u64, bool> VKStreamBuffer::Map(u64 size, u64 alignment) { WaitPendingOperations(offset); bool invalidated = false; - if (offset + size > STREAM_BUFFER_SIZE) { + if (offset + size > stream_buffer_size) { // The buffer would overflow, save the amount of used watches and reset the state. invalidation_mark = current_watch_cursor; current_watch_cursor = 0; @@ -98,40 +115,37 @@ void VKStreamBuffer::Unmap(u64 size) { } void VKStreamBuffer::CreateBuffers(VkBufferUsageFlags usage) { + const auto memory_properties = device.GetPhysical().GetMemoryProperties(); + const u32 preferred_type = GetMemoryType(memory_properties); + const u32 preferred_heap = memory_properties.memoryTypes[preferred_type].heapIndex; + + // Substract from the preferred heap size some bytes to avoid getting out of memory. + const VkDeviceSize heap_size = memory_properties.memoryHeaps[preferred_heap].size; + const VkDeviceSize allocable_size = heap_size - 4 * 1024 * 1024; + VkBufferCreateInfo buffer_ci; buffer_ci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buffer_ci.pNext = nullptr; buffer_ci.flags = 0; - buffer_ci.size = STREAM_BUFFER_SIZE; + buffer_ci.size = std::min(PREFERRED_STREAM_BUFFER_SIZE, allocable_size); buffer_ci.usage = usage; buffer_ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; buffer_ci.queueFamilyIndexCount = 0; buffer_ci.pQueueFamilyIndices = nullptr; - const auto& dev = device.GetLogical(); - buffer = dev.CreateBuffer(buffer_ci); - - const auto& dld = device.GetDispatchLoader(); - const auto requirements = dev.GetBufferMemoryRequirements(*buffer); - // Prefer device local host visible allocations (this should hit AMD's pinned memory). - auto type = - FindMemoryType(device, requirements.memoryTypeBits, - VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | - VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT); - if (!type) { - // Otherwise search for a host visible allocation. - type = FindMemoryType(device, requirements.memoryTypeBits, - VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | - VK_MEMORY_PROPERTY_HOST_COHERENT_BIT); - ASSERT_MSG(type, "No host visible and coherent memory type found"); - } + buffer = device.GetLogical().CreateBuffer(buffer_ci); + + const auto requirements = device.GetLogical().GetBufferMemoryRequirements(*buffer); + const u32 required_flags = requirements.memoryTypeBits; + stream_buffer_size = static_cast<u64>(requirements.size); + VkMemoryAllocateInfo memory_ai; memory_ai.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; memory_ai.pNext = nullptr; memory_ai.allocationSize = requirements.size; - memory_ai.memoryTypeIndex = *type; + memory_ai.memoryTypeIndex = GetMemoryType(memory_properties, required_flags); - memory = dev.AllocateMemory(memory_ai); + memory = device.GetLogical().AllocateMemory(memory_ai); buffer.BindMemory(*memory, 0); } diff --git a/src/video_core/renderer_vulkan/vk_stream_buffer.h b/src/video_core/renderer_vulkan/vk_stream_buffer.h index 58ce8b973..dfddf7ad6 100644 --- a/src/video_core/renderer_vulkan/vk_stream_buffer.h +++ b/src/video_core/renderer_vulkan/vk_stream_buffer.h @@ -56,8 +56,9 @@ private: const VKDevice& device; ///< Vulkan device manager. VKScheduler& scheduler; ///< Command scheduler. - vk::Buffer buffer; ///< Mapped buffer. - vk::DeviceMemory memory; ///< Memory allocation. + vk::Buffer buffer; ///< Mapped buffer. + vk::DeviceMemory memory; ///< Memory allocation. + u64 stream_buffer_size{}; ///< Stream buffer size. u64 offset{}; ///< Buffer iterator. u64 mapped_size{}; ///< Size reserved for the current copy. diff --git a/src/video_core/renderer_vulkan/wrapper.cpp b/src/video_core/renderer_vulkan/wrapper.cpp index 9b94dfff1..539f3c974 100644 --- a/src/video_core/renderer_vulkan/wrapper.cpp +++ b/src/video_core/renderer_vulkan/wrapper.cpp @@ -61,9 +61,9 @@ void Load(VkDevice device, DeviceDispatch& dld) noexcept { X(vkCmdPipelineBarrier); X(vkCmdPushConstants); X(vkCmdSetBlendConstants); - X(vkCmdSetCheckpointNV); X(vkCmdSetDepthBias); X(vkCmdSetDepthBounds); + X(vkCmdSetEvent); X(vkCmdSetScissor); X(vkCmdSetStencilCompareMask); X(vkCmdSetStencilReference); @@ -76,6 +76,7 @@ void Load(VkDevice device, DeviceDispatch& dld) noexcept { X(vkCreateDescriptorPool); X(vkCreateDescriptorSetLayout); X(vkCreateDescriptorUpdateTemplateKHR); + X(vkCreateEvent); X(vkCreateFence); X(vkCreateFramebuffer); X(vkCreateGraphicsPipelines); @@ -94,6 +95,7 @@ void Load(VkDevice device, DeviceDispatch& dld) noexcept { X(vkDestroyDescriptorPool); X(vkDestroyDescriptorSetLayout); X(vkDestroyDescriptorUpdateTemplateKHR); + X(vkDestroyEvent); X(vkDestroyFence); X(vkDestroyFramebuffer); X(vkDestroyImage); @@ -113,10 +115,10 @@ void Load(VkDevice device, DeviceDispatch& dld) noexcept { X(vkFreeMemory); X(vkGetBufferMemoryRequirements); X(vkGetDeviceQueue); + X(vkGetEventStatus); X(vkGetFenceStatus); X(vkGetImageMemoryRequirements); X(vkGetQueryPoolResults); - X(vkGetQueueCheckpointDataNV); X(vkMapMemory); X(vkQueueSubmit); X(vkResetFences); @@ -271,6 +273,10 @@ void Destroy(VkDevice device, VkDeviceMemory handle, const DeviceDispatch& dld) dld.vkFreeMemory(device, handle, nullptr); } +void Destroy(VkDevice device, VkEvent handle, const DeviceDispatch& dld) noexcept { + dld.vkDestroyEvent(device, handle, nullptr); +} + void Destroy(VkDevice device, VkFence handle, const DeviceDispatch& dld) noexcept { dld.vkDestroyFence(device, handle, nullptr); } @@ -409,17 +415,6 @@ DebugCallback Instance::TryCreateDebugCallback( return DebugCallback(messenger, handle, *dld); } -std::vector<VkCheckpointDataNV> Queue::GetCheckpointDataNV(const DeviceDispatch& dld) const { - if (!dld.vkGetQueueCheckpointDataNV) { - return {}; - } - u32 num; - dld.vkGetQueueCheckpointDataNV(queue, &num, nullptr); - std::vector<VkCheckpointDataNV> checkpoints(num); - dld.vkGetQueueCheckpointDataNV(queue, &num, checkpoints.data()); - return checkpoints; -} - void Buffer::BindMemory(VkDeviceMemory memory, VkDeviceSize offset) const { Check(dld->vkBindBufferMemory(owner, handle, memory, offset)); } @@ -469,12 +464,11 @@ std::vector<VkImage> SwapchainKHR::GetImages() const { } Device Device::Create(VkPhysicalDevice physical_device, Span<VkDeviceQueueCreateInfo> queues_ci, - Span<const char*> enabled_extensions, - const VkPhysicalDeviceFeatures2& enabled_features, + Span<const char*> enabled_extensions, const void* next, DeviceDispatch& dld) noexcept { VkDeviceCreateInfo ci; ci.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO; - ci.pNext = &enabled_features; + ci.pNext = next; ci.flags = 0; ci.queueCreateInfoCount = queues_ci.size(); ci.pQueueCreateInfos = queues_ci.data(); @@ -613,6 +607,16 @@ ShaderModule Device::CreateShaderModule(const VkShaderModuleCreateInfo& ci) cons return ShaderModule(object, handle, *dld); } +Event Device::CreateEvent() const { + VkEventCreateInfo ci; + ci.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO; + ci.pNext = nullptr; + ci.flags = 0; + VkEvent object; + Check(dld->vkCreateEvent(handle, &ci, nullptr, &object)); + return Event(object, handle, *dld); +} + SwapchainKHR Device::CreateSwapchainKHR(const VkSwapchainCreateInfoKHR& ci) const { VkSwapchainKHR object; Check(dld->vkCreateSwapchainKHR(handle, &ci, nullptr, &object)); diff --git a/src/video_core/renderer_vulkan/wrapper.h b/src/video_core/renderer_vulkan/wrapper.h index fb3657819..bda16a2cb 100644 --- a/src/video_core/renderer_vulkan/wrapper.h +++ b/src/video_core/renderer_vulkan/wrapper.h @@ -197,9 +197,9 @@ struct DeviceDispatch : public InstanceDispatch { PFN_vkCmdPipelineBarrier vkCmdPipelineBarrier; PFN_vkCmdPushConstants vkCmdPushConstants; PFN_vkCmdSetBlendConstants vkCmdSetBlendConstants; - PFN_vkCmdSetCheckpointNV vkCmdSetCheckpointNV; PFN_vkCmdSetDepthBias vkCmdSetDepthBias; PFN_vkCmdSetDepthBounds vkCmdSetDepthBounds; + PFN_vkCmdSetEvent vkCmdSetEvent; PFN_vkCmdSetScissor vkCmdSetScissor; PFN_vkCmdSetStencilCompareMask vkCmdSetStencilCompareMask; PFN_vkCmdSetStencilReference vkCmdSetStencilReference; @@ -212,6 +212,7 @@ struct DeviceDispatch : public InstanceDispatch { PFN_vkCreateDescriptorPool vkCreateDescriptorPool; PFN_vkCreateDescriptorSetLayout vkCreateDescriptorSetLayout; PFN_vkCreateDescriptorUpdateTemplateKHR vkCreateDescriptorUpdateTemplateKHR; + PFN_vkCreateEvent vkCreateEvent; PFN_vkCreateFence vkCreateFence; PFN_vkCreateFramebuffer vkCreateFramebuffer; PFN_vkCreateGraphicsPipelines vkCreateGraphicsPipelines; @@ -230,6 +231,7 @@ struct DeviceDispatch : public InstanceDispatch { PFN_vkDestroyDescriptorPool vkDestroyDescriptorPool; PFN_vkDestroyDescriptorSetLayout vkDestroyDescriptorSetLayout; PFN_vkDestroyDescriptorUpdateTemplateKHR vkDestroyDescriptorUpdateTemplateKHR; + PFN_vkDestroyEvent vkDestroyEvent; PFN_vkDestroyFence vkDestroyFence; PFN_vkDestroyFramebuffer vkDestroyFramebuffer; PFN_vkDestroyImage vkDestroyImage; @@ -249,10 +251,10 @@ struct DeviceDispatch : public InstanceDispatch { PFN_vkFreeMemory vkFreeMemory; PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements; PFN_vkGetDeviceQueue vkGetDeviceQueue; + PFN_vkGetEventStatus vkGetEventStatus; PFN_vkGetFenceStatus vkGetFenceStatus; PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements; PFN_vkGetQueryPoolResults vkGetQueryPoolResults; - PFN_vkGetQueueCheckpointDataNV vkGetQueueCheckpointDataNV; PFN_vkMapMemory vkMapMemory; PFN_vkQueueSubmit vkQueueSubmit; PFN_vkResetFences vkResetFences; @@ -281,6 +283,7 @@ void Destroy(VkDevice, VkDescriptorPool, const DeviceDispatch&) noexcept; void Destroy(VkDevice, VkDescriptorSetLayout, const DeviceDispatch&) noexcept; void Destroy(VkDevice, VkDescriptorUpdateTemplateKHR, const DeviceDispatch&) noexcept; void Destroy(VkDevice, VkDeviceMemory, const DeviceDispatch&) noexcept; +void Destroy(VkDevice, VkEvent, const DeviceDispatch&) noexcept; void Destroy(VkDevice, VkFence, const DeviceDispatch&) noexcept; void Destroy(VkDevice, VkFramebuffer, const DeviceDispatch&) noexcept; void Destroy(VkDevice, VkImage, const DeviceDispatch&) noexcept; @@ -567,12 +570,8 @@ public: /// Construct a queue handle. constexpr Queue(VkQueue queue, const DeviceDispatch& dld) noexcept : queue{queue}, dld{&dld} {} - /// Returns the checkpoint data. - /// @note Returns an empty vector when the function pointer is not present. - std::vector<VkCheckpointDataNV> GetCheckpointDataNV(const DeviceDispatch& dld) const; - - void Submit(Span<VkSubmitInfo> submit_infos, VkFence fence) const { - Check(dld->vkQueueSubmit(queue, submit_infos.size(), submit_infos.data(), fence)); + VkResult Submit(Span<VkSubmitInfo> submit_infos, VkFence fence) const noexcept { + return dld->vkQueueSubmit(queue, submit_infos.size(), submit_infos.data(), fence); } VkResult Present(const VkPresentInfoKHR& present_info) const noexcept { @@ -654,13 +653,21 @@ public: std::vector<VkImage> GetImages() const; }; +class Event : public Handle<VkEvent, VkDevice, DeviceDispatch> { + using Handle<VkEvent, VkDevice, DeviceDispatch>::Handle; + +public: + VkResult GetStatus() const noexcept { + return dld->vkGetEventStatus(owner, handle); + } +}; + class Device : public Handle<VkDevice, NoOwner, DeviceDispatch> { using Handle<VkDevice, NoOwner, DeviceDispatch>::Handle; public: static Device Create(VkPhysicalDevice physical_device, Span<VkDeviceQueueCreateInfo> queues_ci, - Span<const char*> enabled_extensions, - const VkPhysicalDeviceFeatures2& enabled_features, + Span<const char*> enabled_extensions, const void* next, DeviceDispatch& dld) noexcept; Queue GetQueue(u32 family_index) const noexcept; @@ -702,6 +709,8 @@ public: ShaderModule CreateShaderModule(const VkShaderModuleCreateInfo& ci) const; + Event CreateEvent() const; + SwapchainKHR CreateSwapchainKHR(const VkSwapchainCreateInfoKHR& ci) const; DeviceMemory TryAllocateMemory(const VkMemoryAllocateInfo& ai) const noexcept; @@ -734,18 +743,11 @@ public: dld->vkResetQueryPoolEXT(handle, query_pool, first, count); } - void GetQueryResults(VkQueryPool query_pool, u32 first, u32 count, std::size_t data_size, - void* data, VkDeviceSize stride, VkQueryResultFlags flags) const { - Check(dld->vkGetQueryPoolResults(handle, query_pool, first, count, data_size, data, stride, - flags)); - } - - template <typename T> - T GetQueryResult(VkQueryPool query_pool, u32 first, VkQueryResultFlags flags) const { - static_assert(std::is_trivially_copyable_v<T>); - T value; - GetQueryResults(query_pool, first, 1, sizeof(T), &value, sizeof(T), flags); - return value; + VkResult GetQueryResults(VkQueryPool query_pool, u32 first, u32 count, std::size_t data_size, + void* data, VkDeviceSize stride, VkQueryResultFlags flags) const + noexcept { + return dld->vkGetQueryPoolResults(handle, query_pool, first, count, data_size, data, stride, + flags); } }; @@ -920,10 +922,6 @@ public: dld->vkCmdPushConstants(handle, layout, flags, offset, size, values); } - void SetCheckpointNV(const void* checkpoint_marker) const noexcept { - dld->vkCmdSetCheckpointNV(handle, checkpoint_marker); - } - void SetViewport(u32 first, Span<VkViewport> viewports) const noexcept { dld->vkCmdSetViewport(handle, first, viewports.size(), viewports.data()); } @@ -956,6 +954,10 @@ public: dld->vkCmdSetDepthBounds(handle, min_depth_bounds, max_depth_bounds); } + void SetEvent(VkEvent event, VkPipelineStageFlags stage_flags) const noexcept { + dld->vkCmdSetEvent(handle, event, stage_flags); + } + void BindTransformFeedbackBuffersEXT(u32 first, u32 count, const VkBuffer* buffers, const VkDeviceSize* offsets, const VkDeviceSize* sizes) const noexcept { diff --git a/src/video_core/shader/decode/memory.cpp b/src/video_core/shader/decode/memory.cpp index 8112ead3e..9392f065b 100644 --- a/src/video_core/shader/decode/memory.cpp +++ b/src/video_core/shader/decode/memory.cpp @@ -479,7 +479,7 @@ std::tuple<Node, Node, GlobalMemoryBase> ShaderIR::TrackGlobalMemory(NodeBlock& bb.push_back(Comment(fmt::format("Base address is c[0x{:x}][0x{:x}]", index, offset))); const GlobalMemoryBase descriptor{index, offset}; - const auto& [entry, is_new] = used_global_memory.try_emplace(descriptor); + const auto& entry = used_global_memory.try_emplace(descriptor).first; auto& usage = entry->second; usage.is_written |= is_write; usage.is_read |= is_read; diff --git a/src/video_core/shader/decode/texture.cpp b/src/video_core/shader/decode/texture.cpp index 6c4a1358b..e68f1d305 100644 --- a/src/video_core/shader/decode/texture.cpp +++ b/src/video_core/shader/decode/texture.cpp @@ -139,7 +139,7 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) { } const Node component = Immediate(static_cast<u32>(instr.tld4s.component)); - const SamplerInfo info{TextureType::Texture2D, false, is_depth_compare}; + const SamplerInfo info{TextureType::Texture2D, false, is_depth_compare, false}; const Sampler& sampler = *GetSampler(instr.sampler, info); Node4 values; @@ -171,13 +171,12 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) { const auto coord_count = GetCoordCount(texture_type); Node index_var{}; const Sampler* sampler = - is_bindless ? GetBindlessSampler(base_reg, index_var, {{texture_type, is_array, false}}) - : GetSampler(instr.sampler, {{texture_type, is_array, false}}); + is_bindless + ? GetBindlessSampler(base_reg, index_var, {{texture_type, is_array, false, false}}) + : GetSampler(instr.sampler, {{texture_type, is_array, false, false}}); Node4 values; if (sampler == nullptr) { - for (u32 element = 0; element < values.size(); ++element) { - values[element] = Immediate(0); - } + std::generate(values.begin(), values.end(), [] { return Immediate(0); }); WriteTexInstructionFloat(bb, instr, values); break; } @@ -269,7 +268,6 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) { "NDV is not implemented"); auto texture_type = instr.tmml.texture_type.Value(); - const bool is_array = instr.tmml.array != 0; Node index_var{}; const Sampler* sampler = is_bindless ? GetBindlessSampler(instr.gpr20, index_var) : GetSampler(instr.sampler); @@ -593,8 +591,9 @@ Node4 ShaderIR::GetTexCode(Instruction instr, TextureType texture_type, ++parameter_register; } - const auto [coord_count, total_coord_count] = ValidateAndGetCoordinateElement( - texture_type, depth_compare, is_array, lod_bias_enabled, 4, 5); + const auto coord_counts = ValidateAndGetCoordinateElement(texture_type, depth_compare, is_array, + lod_bias_enabled, 4, 5); + const auto coord_count = std::get<0>(coord_counts); // If enabled arrays index is always stored in the gpr8 field const u64 array_register = instr.gpr8.Value(); // First coordinate index is the gpr8 or gpr8 + 1 when arrays are used @@ -632,8 +631,10 @@ Node4 ShaderIR::GetTexsCode(Instruction instr, TextureType texture_type, const bool lod_bias_enabled = (process_mode != TextureProcessMode::None && process_mode != TextureProcessMode::LZ); - const auto [coord_count, total_coord_count] = ValidateAndGetCoordinateElement( - texture_type, depth_compare, is_array, lod_bias_enabled, 4, 4); + const auto coord_counts = ValidateAndGetCoordinateElement(texture_type, depth_compare, is_array, + lod_bias_enabled, 4, 4); + const auto coord_count = std::get<0>(coord_counts); + // If enabled arrays index is always stored in the gpr8 field const u64 array_register = instr.gpr8.Value(); // First coordinate index is stored in gpr8 field or (gpr8 + 1) when arrays are used diff --git a/src/video_core/shader/track.cpp b/src/video_core/shader/track.cpp index 224943ad9..513e9bf49 100644 --- a/src/video_core/shader/track.cpp +++ b/src/video_core/shader/track.cpp @@ -76,12 +76,13 @@ std::tuple<Node, TrackSampler> ShaderIR::TrackBindlessSampler(Node tracked, cons s64 cursor) { if (const auto cbuf = std::get_if<CbufNode>(&*tracked)) { // Constant buffer found, test if it's an immediate - const auto offset = cbuf->GetOffset(); + const auto& offset = cbuf->GetOffset(); if (const auto immediate = std::get_if<ImmediateNode>(&*offset)) { auto track = MakeTrackSampler<BindlessSamplerNode>(cbuf->GetIndex(), immediate->GetValue()); return {tracked, track}; - } else if (const auto operation = std::get_if<OperationNode>(&*offset)) { + } + if (const auto operation = std::get_if<OperationNode>(&*offset)) { const u32 bound_buffer = registry.GetBoundBuffer(); if (bound_buffer != cbuf->GetIndex()) { return {}; @@ -94,12 +95,12 @@ std::tuple<Node, TrackSampler> ShaderIR::TrackBindlessSampler(Node tracked, cons const auto offset_inm = std::get_if<ImmediateNode>(&*base_offset); const auto& gpu_driver = registry.AccessGuestDriverProfile(); const u32 bindless_cv = NewCustomVariable(); - const Node op = + Node op = Operation(OperationCode::UDiv, gpr, Immediate(gpu_driver.GetTextureHandlerSize())); const Node cv_node = GetCustomVariable(bindless_cv); Node amend_op = Operation(OperationCode::Assign, cv_node, std::move(op)); - const std::size_t amend_index = DeclareAmend(amend_op); + const std::size_t amend_index = DeclareAmend(std::move(amend_op)); AmendNodeCv(amend_index, code[cursor]); // TODO Implement Bindless Index custom variable auto track = MakeTrackSampler<ArraySamplerNode>(cbuf->GetIndex(), @@ -142,7 +143,7 @@ std::tuple<Node, u32, u32> ShaderIR::TrackCbuf(Node tracked, const NodeBlock& co s64 cursor) const { if (const auto cbuf = std::get_if<CbufNode>(&*tracked)) { // Constant buffer found, test if it's an immediate - const auto offset = cbuf->GetOffset(); + const auto& offset = cbuf->GetOffset(); if (const auto immediate = std::get_if<ImmediateNode>(&*offset)) { return {tracked, cbuf->GetIndex(), immediate->GetValue()}; } diff --git a/src/video_core/texture_cache/format_lookup_table.cpp b/src/video_core/texture_cache/format_lookup_table.cpp index e151c26c4..25d2ee2e8 100644 --- a/src/video_core/texture_cache/format_lookup_table.cpp +++ b/src/video_core/texture_cache/format_lookup_table.cpp @@ -196,9 +196,9 @@ std::size_t FormatLookupTable::CalculateIndex(TextureFormat format, bool is_srgb ComponentType alpha_component) noexcept { const auto format_index = static_cast<std::size_t>(format); const auto red_index = static_cast<std::size_t>(red_component); - const auto green_index = static_cast<std::size_t>(red_component); - const auto blue_index = static_cast<std::size_t>(red_component); - const auto alpha_index = static_cast<std::size_t>(red_component); + const auto green_index = static_cast<std::size_t>(green_component); + const auto blue_index = static_cast<std::size_t>(blue_component); + const auto alpha_index = static_cast<std::size_t>(alpha_component); const std::size_t srgb_index = is_srgb ? 1 : 0; return format_index * PerFormat + diff --git a/src/video_core/texture_cache/surface_base.h b/src/video_core/texture_cache/surface_base.h index c5ab21f56..79e10ffbb 100644 --- a/src/video_core/texture_cache/surface_base.h +++ b/src/video_core/texture_cache/surface_base.h @@ -192,6 +192,22 @@ public: index = index_; } + void SetMemoryMarked(bool is_memory_marked_) { + is_memory_marked = is_memory_marked_; + } + + bool IsMemoryMarked() const { + return is_memory_marked; + } + + void SetSyncPending(bool is_sync_pending_) { + is_sync_pending = is_sync_pending_; + } + + bool IsSyncPending() const { + return is_sync_pending; + } + void MarkAsPicked(bool is_picked_) { is_picked = is_picked_; } @@ -303,6 +319,8 @@ private: bool is_target{}; bool is_registered{}; bool is_picked{}; + bool is_memory_marked{}; + bool is_sync_pending{}; u32 index{NO_RT}; u64 modification_tick{}; }; diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h index 69ca08fd1..cf6bd005a 100644 --- a/src/video_core/texture_cache/texture_cache.h +++ b/src/video_core/texture_cache/texture_cache.h @@ -6,6 +6,7 @@ #include <algorithm> #include <array> +#include <list> #include <memory> #include <mutex> #include <set> @@ -62,6 +63,30 @@ public: } } + void OnCPUWrite(VAddr addr, std::size_t size) { + std::lock_guard lock{mutex}; + + for (const auto& surface : GetSurfacesInRegion(addr, size)) { + if (surface->IsMemoryMarked()) { + UnmarkMemory(surface); + surface->SetSyncPending(true); + marked_for_unregister.emplace_back(surface); + } + } + } + + void SyncGuestHost() { + std::lock_guard lock{mutex}; + + for (const auto& surface : marked_for_unregister) { + if (surface->IsRegistered()) { + surface->SetSyncPending(false); + Unregister(surface); + } + } + marked_for_unregister.clear(); + } + /** * Guarantees that rendertargets don't unregister themselves if the * collide. Protection is currently only done on 3D slices. @@ -85,10 +110,20 @@ public: return a->GetModificationTick() < b->GetModificationTick(); }); for (const auto& surface : surfaces) { + mutex.unlock(); FlushSurface(surface); + mutex.lock(); } } + bool MustFlushRegion(VAddr addr, std::size_t size) { + std::lock_guard lock{mutex}; + + const auto surfaces = GetSurfacesInRegion(addr, size); + return std::any_of(surfaces.cbegin(), surfaces.cend(), + [](const TSurface& surface) { return surface->IsModified(); }); + } + TView GetTextureSurface(const Tegra::Texture::TICEntry& tic, const VideoCommon::Shader::Sampler& entry) { std::lock_guard lock{mutex}; @@ -206,8 +241,14 @@ public: auto surface_view = GetSurface(gpu_addr, *cpu_addr, SurfaceParams::CreateForFramebuffer(system, index), true); - if (render_targets[index].target) - render_targets[index].target->MarkAsRenderTarget(false, NO_RT); + if (render_targets[index].target) { + auto& surface = render_targets[index].target; + surface->MarkAsRenderTarget(false, NO_RT); + const auto& cr_params = surface->GetSurfaceParams(); + if (!cr_params.is_tiled && Settings::values.use_asynchronous_gpu_emulation) { + AsyncFlushSurface(surface); + } + } render_targets[index].target = surface_view.first; render_targets[index].view = surface_view.second; if (render_targets[index].target) @@ -284,6 +325,34 @@ public: return ++ticks; } + void CommitAsyncFlushes() { + committed_flushes.push_back(uncommitted_flushes); + uncommitted_flushes.reset(); + } + + bool HasUncommittedFlushes() const { + return uncommitted_flushes != nullptr; + } + + bool ShouldWaitAsyncFlushes() const { + return !committed_flushes.empty() && committed_flushes.front() != nullptr; + } + + void PopAsyncFlushes() { + if (committed_flushes.empty()) { + return; + } + auto& flush_list = committed_flushes.front(); + if (!flush_list) { + committed_flushes.pop_front(); + return; + } + for (TSurface& surface : *flush_list) { + FlushSurface(surface); + } + committed_flushes.pop_front(); + } + protected: explicit TextureCache(Core::System& system, VideoCore::RasterizerInterface& rasterizer, bool is_astc_supported) @@ -345,9 +414,20 @@ protected: surface->SetCpuAddr(*cpu_addr); RegisterInnerCache(surface); surface->MarkAsRegistered(true); + surface->SetMemoryMarked(true); rasterizer.UpdatePagesCachedCount(*cpu_addr, size, 1); } + void UnmarkMemory(TSurface surface) { + if (!surface->IsMemoryMarked()) { + return; + } + const std::size_t size = surface->GetSizeInBytes(); + const VAddr cpu_addr = surface->GetCpuAddr(); + rasterizer.UpdatePagesCachedCount(cpu_addr, size, -1); + surface->SetMemoryMarked(false); + } + void Unregister(TSurface surface) { if (guard_render_targets && surface->IsProtected()) { return; @@ -355,9 +435,11 @@ protected: if (!guard_render_targets && surface->IsRenderTarget()) { ManageRenderTargetUnregister(surface); } - const std::size_t size = surface->GetSizeInBytes(); - const VAddr cpu_addr = surface->GetCpuAddr(); - rasterizer.UpdatePagesCachedCount(cpu_addr, size, -1); + UnmarkMemory(surface); + if (surface->IsSyncPending()) { + marked_for_unregister.remove(surface); + surface->SetSyncPending(false); + } UnregisterInnerCache(surface); surface->MarkAsRegistered(false); ReserveSurface(surface->GetSurfaceParams(), surface); @@ -417,7 +499,7 @@ private: **/ RecycleStrategy PickStrategy(std::vector<TSurface>& overlaps, const SurfaceParams& params, const GPUVAddr gpu_addr, const MatchTopologyResult untopological) { - if (Settings::values.use_accurate_gpu_emulation) { + if (Settings::IsGPULevelExtreme()) { return RecycleStrategy::Flush; } // 3D Textures decision @@ -461,7 +543,7 @@ private: } switch (PickStrategy(overlaps, params, gpu_addr, untopological)) { case RecycleStrategy::Ignore: { - return InitializeSurface(gpu_addr, params, Settings::values.use_accurate_gpu_emulation); + return InitializeSurface(gpu_addr, params, Settings::IsGPULevelExtreme()); } case RecycleStrategy::Flush: { std::sort(overlaps.begin(), overlaps.end(), @@ -509,7 +591,7 @@ private: } const auto& final_params = new_surface->GetSurfaceParams(); if (cr_params.type != final_params.type) { - if (Settings::values.use_accurate_gpu_emulation) { + if (Settings::IsGPULevelExtreme()) { BufferCopy(current_surface, new_surface); } } else { @@ -598,7 +680,7 @@ private: if (passed_tests == 0) { return {}; // In Accurate GPU all tests should pass, else we recycle - } else if (Settings::values.use_accurate_gpu_emulation && passed_tests != overlaps.size()) { + } else if (Settings::IsGPULevelExtreme() && passed_tests != overlaps.size()) { return {}; } for (const auto& surface : overlaps) { @@ -668,7 +750,7 @@ private: for (const auto& surface : overlaps) { if (!surface->MatchTarget(params.target)) { if (overlaps.size() == 1 && surface->GetCpuAddr() == cpu_addr) { - if (Settings::values.use_accurate_gpu_emulation) { + if (Settings::IsGPULevelExtreme()) { return std::nullopt; } Unregister(surface); @@ -1106,6 +1188,13 @@ private: TView view; }; + void AsyncFlushSurface(TSurface& surface) { + if (!uncommitted_flushes) { + uncommitted_flushes = std::make_shared<std::list<TSurface>>(); + } + uncommitted_flushes->push_back(surface); + } + VideoCore::RasterizerInterface& rasterizer; FormatLookupTable format_lookup_table; @@ -1150,6 +1239,11 @@ private: std::unordered_map<u32, TSurface> invalid_cache; std::vector<u8> invalid_memory; + std::list<TSurface> marked_for_unregister; + + std::shared_ptr<std::list<TSurface>> uncommitted_flushes{}; + std::list<std::shared_ptr<std::list<TSurface>>> committed_flushes; + StagingCache staging_cache; std::recursive_mutex mutex; }; diff --git a/src/yuzu/configuration/config.cpp b/src/yuzu/configuration/config.cpp index 3b9ab38dd..196a3a116 100644 --- a/src/yuzu/configuration/config.cpp +++ b/src/yuzu/configuration/config.cpp @@ -532,6 +532,8 @@ void Config::ReadDebuggingValues() { Settings::values.reporting_services = ReadSetting(QStringLiteral("reporting_services"), false).toBool(); Settings::values.quest_flag = ReadSetting(QStringLiteral("quest_flag"), false).toBool(); + Settings::values.disable_cpu_opt = + ReadSetting(QStringLiteral("disable_cpu_opt"), false).toBool(); qt_config->endGroup(); } @@ -637,8 +639,8 @@ void Config::ReadRendererValues() { Settings::values.frame_limit = ReadSetting(QStringLiteral("frame_limit"), 100).toInt(); Settings::values.use_disk_shader_cache = ReadSetting(QStringLiteral("use_disk_shader_cache"), true).toBool(); - Settings::values.use_accurate_gpu_emulation = - ReadSetting(QStringLiteral("use_accurate_gpu_emulation"), false).toBool(); + const int gpu_accuracy_level = ReadSetting(QStringLiteral("gpu_accuracy"), 0).toInt(); + Settings::values.gpu_accuracy = static_cast<Settings::GPUAccuracy>(gpu_accuracy_level); Settings::values.use_asynchronous_gpu_emulation = ReadSetting(QStringLiteral("use_asynchronous_gpu_emulation"), false).toBool(); Settings::values.use_vsync = ReadSetting(QStringLiteral("use_vsync"), true).toBool(); @@ -1001,6 +1003,7 @@ void Config::SaveDebuggingValues() { WriteSetting(QStringLiteral("dump_exefs"), Settings::values.dump_exefs, false); WriteSetting(QStringLiteral("dump_nso"), Settings::values.dump_nso, false); WriteSetting(QStringLiteral("quest_flag"), Settings::values.quest_flag, false); + WriteSetting(QStringLiteral("disable_cpu_opt"), Settings::values.disable_cpu_opt, false); qt_config->endGroup(); } @@ -1077,8 +1080,8 @@ void Config::SaveRendererValues() { WriteSetting(QStringLiteral("frame_limit"), Settings::values.frame_limit, 100); WriteSetting(QStringLiteral("use_disk_shader_cache"), Settings::values.use_disk_shader_cache, true); - WriteSetting(QStringLiteral("use_accurate_gpu_emulation"), - Settings::values.use_accurate_gpu_emulation, false); + WriteSetting(QStringLiteral("gpu_accuracy"), static_cast<int>(Settings::values.gpu_accuracy), + 0); WriteSetting(QStringLiteral("use_asynchronous_gpu_emulation"), Settings::values.use_asynchronous_gpu_emulation, false); WriteSetting(QStringLiteral("use_vsync"), Settings::values.use_vsync, true); diff --git a/src/yuzu/configuration/configure_debug.cpp b/src/yuzu/configuration/configure_debug.cpp index 9631059c7..c2026763e 100644 --- a/src/yuzu/configuration/configure_debug.cpp +++ b/src/yuzu/configuration/configure_debug.cpp @@ -36,6 +36,7 @@ void ConfigureDebug::SetConfiguration() { ui->homebrew_args_edit->setText(QString::fromStdString(Settings::values.program_args)); ui->reporting_services->setChecked(Settings::values.reporting_services); ui->quest_flag->setChecked(Settings::values.quest_flag); + ui->disable_cpu_opt->setChecked(Settings::values.disable_cpu_opt); ui->enable_graphics_debugging->setEnabled(!Core::System::GetInstance().IsPoweredOn()); ui->enable_graphics_debugging->setChecked(Settings::values.renderer_debug); } @@ -48,6 +49,7 @@ void ConfigureDebug::ApplyConfiguration() { Settings::values.program_args = ui->homebrew_args_edit->text().toStdString(); Settings::values.reporting_services = ui->reporting_services->isChecked(); Settings::values.quest_flag = ui->quest_flag->isChecked(); + Settings::values.disable_cpu_opt = ui->disable_cpu_opt->isChecked(); Settings::values.renderer_debug = ui->enable_graphics_debugging->isChecked(); Debugger::ToggleConsole(); Log::Filter filter; diff --git a/src/yuzu/configuration/configure_debug.ui b/src/yuzu/configuration/configure_debug.ui index e028c4c80..e0d4c4a44 100644 --- a/src/yuzu/configuration/configure_debug.ui +++ b/src/yuzu/configuration/configure_debug.ui @@ -215,6 +215,13 @@ </property> </widget> </item> + <item> + <widget class="QCheckBox" name="disable_cpu_opt"> + <property name="text"> + <string>Disable CPU JIT optimizations</string> + </property> + </widget> + </item> </layout> </widget> </item> diff --git a/src/yuzu/configuration/configure_graphics_advanced.cpp b/src/yuzu/configuration/configure_graphics_advanced.cpp index b9f429f84..0a3f47339 100644 --- a/src/yuzu/configuration/configure_graphics_advanced.cpp +++ b/src/yuzu/configuration/configure_graphics_advanced.cpp @@ -19,7 +19,7 @@ ConfigureGraphicsAdvanced::~ConfigureGraphicsAdvanced() = default; void ConfigureGraphicsAdvanced::SetConfiguration() { const bool runtime_lock = !Core::System::GetInstance().IsPoweredOn(); - ui->use_accurate_gpu_emulation->setChecked(Settings::values.use_accurate_gpu_emulation); + ui->gpu_accuracy->setCurrentIndex(static_cast<int>(Settings::values.gpu_accuracy)); ui->use_vsync->setEnabled(runtime_lock); ui->use_vsync->setChecked(Settings::values.use_vsync); ui->force_30fps_mode->setEnabled(runtime_lock); @@ -29,7 +29,8 @@ void ConfigureGraphicsAdvanced::SetConfiguration() { } void ConfigureGraphicsAdvanced::ApplyConfiguration() { - Settings::values.use_accurate_gpu_emulation = ui->use_accurate_gpu_emulation->isChecked(); + auto gpu_accuracy = static_cast<Settings::GPUAccuracy>(ui->gpu_accuracy->currentIndex()); + Settings::values.gpu_accuracy = gpu_accuracy; Settings::values.use_vsync = ui->use_vsync->isChecked(); Settings::values.force_30fps_mode = ui->force_30fps_mode->isChecked(); Settings::values.max_anisotropy = ui->anisotropic_filtering_combobox->currentIndex(); diff --git a/src/yuzu/configuration/configure_graphics_advanced.ui b/src/yuzu/configuration/configure_graphics_advanced.ui index 42eec278e..0c7b383e0 100644 --- a/src/yuzu/configuration/configure_graphics_advanced.ui +++ b/src/yuzu/configuration/configure_graphics_advanced.ui @@ -23,11 +23,34 @@ </property> <layout class="QVBoxLayout" name="verticalLayout_3"> <item> - <widget class="QCheckBox" name="use_accurate_gpu_emulation"> - <property name="text"> - <string>Use accurate GPU emulation (slow)</string> - </property> - </widget> + <layout class="QHBoxLayout" name="horizontalLayout_2"> + <item> + <widget class="QLabel" name="label_gpu_accuracy"> + <property name="text"> + <string>Accuracy Level:</string> + </property> + </widget> + </item> + <item> + <widget class="QComboBox" name="gpu_accuracy"> + <item> + <property name="text"> + <string notr="true">Normal</string> + </property> + </item> + <item> + <property name="text"> + <string notr="true">High</string> + </property> + </item> + <item> + <property name="text"> + <string notr="true">Extreme(very slow)</string> + </property> + </item> + </widget> + </item> + </layout> </item> <item> <widget class="QCheckBox" name="use_vsync"> diff --git a/src/yuzu/main.cpp b/src/yuzu/main.cpp index 2c8eb481d..05baec7e1 100644 --- a/src/yuzu/main.cpp +++ b/src/yuzu/main.cpp @@ -802,10 +802,6 @@ void GMainWindow::ConnectMenuEvents() { connect(ui.action_Load_Folder, &QAction::triggered, this, &GMainWindow::OnMenuLoadFolder); connect(ui.action_Install_File_NAND, &QAction::triggered, this, &GMainWindow::OnMenuInstallToNAND); - connect(ui.action_Select_NAND_Directory, &QAction::triggered, this, - [this] { OnMenuSelectEmulatedDirectory(EmulatedDirectoryTarget::NAND); }); - connect(ui.action_Select_SDMC_Directory, &QAction::triggered, this, - [this] { OnMenuSelectEmulatedDirectory(EmulatedDirectoryTarget::SDMC); }); connect(ui.action_Exit, &QAction::triggered, this, &QMainWindow::close); connect(ui.action_Load_Amiibo, &QAction::triggered, this, &GMainWindow::OnLoadAmiibo); @@ -940,16 +936,18 @@ bool GMainWindow::LoadROM(const QString& filename) { default: if (static_cast<u32>(result) > static_cast<u32>(Core::System::ResultStatus::ErrorLoader)) { - LOG_CRITICAL(Frontend, "Failed to load ROM!"); const u16 loader_id = static_cast<u16>(Core::System::ResultStatus::ErrorLoader); const u16 error_id = static_cast<u16>(result) - loader_id; + const std::string error_code = fmt::format("({:04X}-{:04X})", loader_id, error_id); + LOG_CRITICAL(Frontend, "Failed to load ROM! {}", error_code); QMessageBox::critical( - this, tr("Error while loading ROM!"), + this, + tr("Error while loading ROM! ").append(QString::fromStdString(error_code)), QString::fromStdString(fmt::format( - "While attempting to load the ROM requested, an error occured. Please " - "refer to the yuzu wiki for more information or the yuzu discord for " - "additional help.\n\nError Code: {:04X}-{:04X}\nError Description: {}", - loader_id, error_id, static_cast<Loader::ResultStatus>(error_id)))); + "{}<br>Please follow <a href='https://yuzu-emu.org/help/quickstart/'>the " + "yuzu quickstart guide</a> to redump your files.<br>You can refer " + "to the yuzu wiki</a> or the yuzu Discord</a> for help.", + static_cast<Loader::ResultStatus>(error_id)))); } else { QMessageBox::critical( this, tr("Error while loading ROM!"), @@ -1663,28 +1661,6 @@ void GMainWindow::OnMenuInstallToNAND() { } } -void GMainWindow::OnMenuSelectEmulatedDirectory(EmulatedDirectoryTarget target) { - const auto res = QMessageBox::information( - this, tr("Changing Emulated Directory"), - tr("You are about to change the emulated %1 directory of the system. Please note " - "that this does not also move the contents of the previous directory to the " - "new one and you will have to do that yourself.") - .arg(target == EmulatedDirectoryTarget::SDMC ? tr("SD card") : tr("NAND")), - QMessageBox::StandardButtons{QMessageBox::Ok, QMessageBox::Cancel}); - - if (res == QMessageBox::Cancel) - return; - - QString dir_path = QFileDialog::getExistingDirectory(this, tr("Select Directory")); - if (!dir_path.isEmpty()) { - FileUtil::GetUserPath(target == EmulatedDirectoryTarget::SDMC ? FileUtil::UserPath::SDMCDir - : FileUtil::UserPath::NANDDir, - dir_path.toStdString()); - Core::System::GetInstance().GetFileSystemController().CreateFactories(*vfs); - game_list->PopulateAsync(UISettings::values.game_dirs); - } -} - void GMainWindow::OnMenuRecentFile() { QAction* action = qobject_cast<QAction*>(sender()); assert(action); @@ -2095,27 +2071,25 @@ void GMainWindow::OnReinitializeKeys(ReinitializeKeyBehavior behavior) { QString errors; if (!pdm.HasFuses()) { - errors += tr("- Missing fuses - Cannot derive SBK\n"); + errors += tr("Missing fuses"); } if (!pdm.HasBoot0()) { - errors += tr("- Missing BOOT0 - Cannot derive master keys\n"); + errors += tr(" - Missing BOOT0"); } if (!pdm.HasPackage2()) { - errors += tr("- Missing BCPKG2-1-Normal-Main - Cannot derive general keys\n"); + errors += tr(" - Missing BCPKG2-1-Normal-Main"); } if (!pdm.HasProdInfo()) { - errors += tr("- Missing PRODINFO - Cannot derive title keys\n"); + errors += tr(" - Missing PRODINFO"); } if (!errors.isEmpty()) { QMessageBox::warning( - this, tr("Warning Missing Derivation Components"), - tr("The following are missing from your configuration that may hinder key " - "derivation. It will be attempted but may not complete.<br><br>") + - errors + - tr("<br><br>You can get all of these and dump all of your games easily by " - "following <a href='https://yuzu-emu.org/help/quickstart/'>the " - "quickstart guide</a>. Alternatively, you can use another method of dumping " - "to obtain all of your keys.")); + this, tr("Derivation Components Missing"), + tr("Components are missing that may hinder key derivation from completing. " + "<br>Please follow <a href='https://yuzu-emu.org/help/quickstart/'>the yuzu " + "quickstart guide</a> to get all your keys and " + "games.<br><br><small>(%1)</small>") + .arg(errors)); } QProgressDialog prog; diff --git a/src/yuzu/main.h b/src/yuzu/main.h index a67125567..0b750689d 100644 --- a/src/yuzu/main.h +++ b/src/yuzu/main.h @@ -196,8 +196,6 @@ private slots: void OnMenuLoadFile(); void OnMenuLoadFolder(); void OnMenuInstallToNAND(); - /// Called whenever a user select the "File->Select -- Directory" where -- is NAND or SD Card - void OnMenuSelectEmulatedDirectory(EmulatedDirectoryTarget target); void OnMenuRecentFile(); void OnConfigure(); void OnLoadAmiibo(); diff --git a/src/yuzu/main.ui b/src/yuzu/main.ui index a2c9e4547..ae414241e 100644 --- a/src/yuzu/main.ui +++ b/src/yuzu/main.ui @@ -64,8 +64,6 @@ <addaction name="separator"/> <addaction name="menu_recent_files"/> <addaction name="separator"/> - <addaction name="action_Select_NAND_Directory"/> - <addaction name="action_Select_SDMC_Directory"/> <addaction name="separator"/> <addaction name="action_Load_Amiibo"/> <addaction name="separator"/> @@ -217,22 +215,6 @@ <string>Show Status Bar</string> </property> </action> - <action name="action_Select_NAND_Directory"> - <property name="text"> - <string>Select NAND Directory...</string> - </property> - <property name="toolTip"> - <string>Selects a folder to use as the root of the emulated NAND</string> - </property> - </action> - <action name="action_Select_SDMC_Directory"> - <property name="text"> - <string>Select SD Card Directory...</string> - </property> - <property name="toolTip"> - <string>Selects a folder to use as the root of the emulated SD card</string> - </property> - </action> <action name="action_Fullscreen"> <property name="checkable"> <bool>true</bool> diff --git a/src/yuzu_cmd/config.cpp b/src/yuzu_cmd/config.cpp index f4cd905c9..d1ac354bf 100644 --- a/src/yuzu_cmd/config.cpp +++ b/src/yuzu_cmd/config.cpp @@ -388,8 +388,8 @@ void Config::ReadValues() { static_cast<u16>(sdl2_config->GetInteger("Renderer", "frame_limit", 100)); Settings::values.use_disk_shader_cache = sdl2_config->GetBoolean("Renderer", "use_disk_shader_cache", false); - Settings::values.use_accurate_gpu_emulation = - sdl2_config->GetBoolean("Renderer", "use_accurate_gpu_emulation", false); + const int gpu_accuracy_level = sdl2_config->GetInteger("Renderer", "gpu_accuracy", 0); + Settings::values.gpu_accuracy = static_cast<Settings::GPUAccuracy>(gpu_accuracy_level); Settings::values.use_asynchronous_gpu_emulation = sdl2_config->GetBoolean("Renderer", "use_asynchronous_gpu_emulation", false); Settings::values.use_vsync = @@ -425,6 +425,8 @@ void Config::ReadValues() { Settings::values.reporting_services = sdl2_config->GetBoolean("Debugging", "reporting_services", false); Settings::values.quest_flag = sdl2_config->GetBoolean("Debugging", "quest_flag", false); + Settings::values.disable_cpu_opt = + sdl2_config->GetBoolean("Debugging", "disable_cpu_opt", false); const auto title_list = sdl2_config->Get("AddOns", "title_ids", ""); std::stringstream ss(title_list); diff --git a/src/yuzu_cmd/default_ini.h b/src/yuzu_cmd/default_ini.h index d63d7a58e..60b1a62fa 100644 --- a/src/yuzu_cmd/default_ini.h +++ b/src/yuzu_cmd/default_ini.h @@ -146,9 +146,9 @@ frame_limit = # 0 (default): Off, 1 : On use_disk_shader_cache = -# Whether to use accurate GPU emulation -# 0 (default): Off (fast), 1 : On (slow) -use_accurate_gpu_emulation = +# Which gpu accuracy level to use +# 0 (Normal), 1 (High), 2 (Extreme) +gpu_accuracy = # Whether to use asynchronous GPU emulation # 0 : Off (slow), 1 (default): On (fast) @@ -280,6 +280,9 @@ dump_nso=false # Determines whether or not yuzu will report to the game that the emulated console is in Kiosk Mode # false: Retail/Normal Mode (default), true: Kiosk Mode quest_flag = +# Determines whether or not JIT CPU optimizations are enabled +# false: Optimizations Enabled, true: Optimizations Disabled +disable_cpu_opt = [WebService] # Whether or not to enable telemetry diff --git a/src/yuzu_cmd/emu_window/emu_window_sdl2_vk.cpp b/src/yuzu_cmd/emu_window/emu_window_sdl2_vk.cpp index f2990910e..cb8e68a39 100644 --- a/src/yuzu_cmd/emu_window/emu_window_sdl2_vk.cpp +++ b/src/yuzu_cmd/emu_window/emu_window_sdl2_vk.cpp @@ -29,6 +29,7 @@ EmuWindow_SDL2_VK::EmuWindow_SDL2_VK(Core::System& system, bool fullscreen) SDL_WINDOW_RESIZABLE | SDL_WINDOW_ALLOW_HIGHDPI); SDL_SysWMinfo wm; + SDL_VERSION(&wm.version); if (SDL_GetWindowWMInfo(render_window, &wm) == SDL_FALSE) { LOG_CRITICAL(Frontend, "Failed to get information from the window manager"); std::exit(EXIT_FAILURE); @@ -70,7 +71,7 @@ EmuWindow_SDL2_VK::EmuWindow_SDL2_VK(Core::System& system, bool fullscreen) EmuWindow_SDL2_VK::~EmuWindow_SDL2_VK() = default; std::unique_ptr<Core::Frontend::GraphicsContext> EmuWindow_SDL2_VK::CreateSharedContext() const { - return nullptr; + return std::make_unique<DummyContext>(); } void EmuWindow_SDL2_VK::Present() { diff --git a/src/yuzu_cmd/emu_window/emu_window_sdl2_vk.h b/src/yuzu_cmd/emu_window/emu_window_sdl2_vk.h index b8021ebea..77a6ca72b 100644 --- a/src/yuzu_cmd/emu_window/emu_window_sdl2_vk.h +++ b/src/yuzu_cmd/emu_window/emu_window_sdl2_vk.h @@ -22,3 +22,5 @@ public: std::unique_ptr<Core::Frontend::GraphicsContext> CreateSharedContext() const override; }; + +class DummyContext : public Core::Frontend::GraphicsContext {}; diff --git a/src/yuzu_tester/config.cpp b/src/yuzu_tester/config.cpp index ee2591c8f..c0325cc3c 100644 --- a/src/yuzu_tester/config.cpp +++ b/src/yuzu_tester/config.cpp @@ -126,8 +126,8 @@ void Config::ReadValues() { Settings::values.frame_limit = 100; Settings::values.use_disk_shader_cache = sdl2_config->GetBoolean("Renderer", "use_disk_shader_cache", false); - Settings::values.use_accurate_gpu_emulation = - sdl2_config->GetBoolean("Renderer", "use_accurate_gpu_emulation", false); + const int gpu_accuracy_level = sdl2_config->GetInteger("Renderer", "gpu_accuracy", 0); + Settings::values.gpu_accuracy = static_cast<Settings::GPUAccuracy>(gpu_accuracy_level); Settings::values.use_asynchronous_gpu_emulation = sdl2_config->GetBoolean("Renderer", "use_asynchronous_gpu_emulation", false); |