diff options
Diffstat (limited to '')
-rw-r--r-- | src/core/arm/dynarmic/arm_dynarmic_32.cpp | 6 | ||||
-rw-r--r-- | src/core/arm/dynarmic/arm_dynarmic_64.cpp | 12 | ||||
-rw-r--r-- | src/core/device_memory.cpp | 2 | ||||
-rw-r--r-- | src/core/device_memory.h | 17 | ||||
-rw-r--r-- | src/core/memory.cpp | 18 |
5 files changed, 45 insertions, 10 deletions
diff --git a/src/core/arm/dynarmic/arm_dynarmic_32.cpp b/src/core/arm/dynarmic/arm_dynarmic_32.cpp index cea7f0fb1..c8f6dc765 100644 --- a/src/core/arm/dynarmic/arm_dynarmic_32.cpp +++ b/src/core/arm/dynarmic/arm_dynarmic_32.cpp @@ -128,6 +128,7 @@ std::shared_ptr<Dynarmic::A32::Jit> ARM_Dynarmic_32::MakeJit(Common::PageTable* if (page_table) { config.page_table = reinterpret_cast<std::array<std::uint8_t*, NUM_PAGE_TABLE_ENTRIES>*>( page_table->pointers.data()); + config.fastmem_pointer = page_table->fastmem_arena; } config.absolute_offset_page_table = true; config.page_table_pointer_mask_bits = Common::PageTable::ATTRIBUTE_BITS; @@ -143,7 +144,7 @@ std::shared_ptr<Dynarmic::A32::Jit> ARM_Dynarmic_32::MakeJit(Common::PageTable* // Code cache size config.code_cache_size = 512 * 1024 * 1024; - config.far_code_offset = 256 * 1024 * 1024; + config.far_code_offset = 400 * 1024 * 1024; // Safe optimizations if (Settings::values.cpu_accuracy.GetValue() == Settings::CPUAccuracy::DebugMode) { @@ -171,6 +172,9 @@ std::shared_ptr<Dynarmic::A32::Jit> ARM_Dynarmic_32::MakeJit(Common::PageTable* if (!Settings::values.cpuopt_reduce_misalign_checks) { config.only_detect_misalignment_via_page_table_on_page_boundary = false; } + if (!Settings::values.cpuopt_fastmem) { + config.fastmem_pointer = nullptr; + } } // Unsafe optimizations diff --git a/src/core/arm/dynarmic/arm_dynarmic_64.cpp b/src/core/arm/dynarmic/arm_dynarmic_64.cpp index 63193dcb1..ba524cd05 100644 --- a/src/core/arm/dynarmic/arm_dynarmic_64.cpp +++ b/src/core/arm/dynarmic/arm_dynarmic_64.cpp @@ -160,6 +160,10 @@ std::shared_ptr<Dynarmic::A64::Jit> ARM_Dynarmic_64::MakeJit(Common::PageTable* config.absolute_offset_page_table = true; config.detect_misaligned_access_via_page_table = 16 | 32 | 64 | 128; config.only_detect_misalignment_via_page_table_on_page_boundary = true; + + config.fastmem_pointer = page_table->fastmem_arena; + config.fastmem_address_space_bits = address_space_bits; + config.silently_mirror_fastmem = false; } // Multi-process state @@ -181,7 +185,7 @@ std::shared_ptr<Dynarmic::A64::Jit> ARM_Dynarmic_64::MakeJit(Common::PageTable* // Code cache size config.code_cache_size = 512 * 1024 * 1024; - config.far_code_offset = 256 * 1024 * 1024; + config.far_code_offset = 400 * 1024 * 1024; // Safe optimizations if (Settings::values.cpu_accuracy.GetValue() == Settings::CPUAccuracy::DebugMode) { @@ -209,6 +213,9 @@ std::shared_ptr<Dynarmic::A64::Jit> ARM_Dynarmic_64::MakeJit(Common::PageTable* if (!Settings::values.cpuopt_reduce_misalign_checks) { config.only_detect_misalignment_via_page_table_on_page_boundary = false; } + if (!Settings::values.cpuopt_fastmem) { + config.fastmem_pointer = nullptr; + } } // Unsafe optimizations @@ -223,6 +230,9 @@ std::shared_ptr<Dynarmic::A64::Jit> ARM_Dynarmic_64::MakeJit(Common::PageTable* if (Settings::values.cpuopt_unsafe_inaccurate_nan.GetValue()) { config.optimizations |= Dynarmic::OptimizationFlag::Unsafe_InaccurateNaN; } + if (Settings::values.cpuopt_unsafe_fastmem_check.GetValue()) { + config.fastmem_address_space_bits = 64; + } } return std::make_shared<Dynarmic::A64::Jit>(config); diff --git a/src/core/device_memory.cpp b/src/core/device_memory.cpp index 0c4b440ed..f19c0515f 100644 --- a/src/core/device_memory.cpp +++ b/src/core/device_memory.cpp @@ -6,7 +6,7 @@ namespace Core { -DeviceMemory::DeviceMemory() : buffer{DramMemoryMap::Size} {} +DeviceMemory::DeviceMemory() : buffer{DramMemoryMap::Size, 1ULL << 39} {} DeviceMemory::~DeviceMemory() = default; } // namespace Core diff --git a/src/core/device_memory.h b/src/core/device_memory.h index 5b1ae28f3..c4d17705f 100644 --- a/src/core/device_memory.h +++ b/src/core/device_memory.h @@ -5,7 +5,7 @@ #pragma once #include "common/common_types.h" -#include "common/virtual_buffer.h" +#include "common/host_memory.h" namespace Core { @@ -21,27 +21,30 @@ enum : u64 { }; }; // namespace DramMemoryMap -class DeviceMemory : NonCopyable { +class DeviceMemory { public: explicit DeviceMemory(); ~DeviceMemory(); + DeviceMemory& operator=(const DeviceMemory&) = delete; + DeviceMemory(const DeviceMemory&) = delete; + template <typename T> PAddr GetPhysicalAddr(const T* ptr) const { - return (reinterpret_cast<uintptr_t>(ptr) - reinterpret_cast<uintptr_t>(buffer.data())) + + return (reinterpret_cast<uintptr_t>(ptr) - + reinterpret_cast<uintptr_t>(buffer.BackingBasePointer())) + DramMemoryMap::Base; } u8* GetPointer(PAddr addr) { - return buffer.data() + (addr - DramMemoryMap::Base); + return buffer.BackingBasePointer() + (addr - DramMemoryMap::Base); } const u8* GetPointer(PAddr addr) const { - return buffer.data() + (addr - DramMemoryMap::Base); + return buffer.BackingBasePointer() + (addr - DramMemoryMap::Base); } -private: - Common::VirtualBuffer<u8> buffer; + Common::HostMemory buffer; }; } // namespace Core diff --git a/src/core/memory.cpp b/src/core/memory.cpp index 9857278f6..f285c6f63 100644 --- a/src/core/memory.cpp +++ b/src/core/memory.cpp @@ -12,6 +12,7 @@ #include "common/common_types.h" #include "common/logging/log.h" #include "common/page_table.h" +#include "common/settings.h" #include "common/swap.h" #include "core/arm/arm_interface.h" #include "core/core.h" @@ -32,6 +33,7 @@ struct Memory::Impl { void SetCurrentPageTable(Kernel::KProcess& process, u32 core_id) { current_page_table = &process.PageTable().PageTableImpl(); + current_page_table->fastmem_arena = system.DeviceMemory().buffer.VirtualBasePointer(); const std::size_t address_space_width = process.PageTable().GetAddressSpaceWidth(); @@ -41,13 +43,23 @@ struct Memory::Impl { void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, PAddr target) { ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size); ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base); + ASSERT_MSG(target >= DramMemoryMap::Base && target < DramMemoryMap::End, + "Out of bounds target: {:016X}", target); MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, target, Common::PageType::Memory); + + if (Settings::IsFastmemEnabled()) { + system.DeviceMemory().buffer.Map(base, target - DramMemoryMap::Base, size); + } } void UnmapRegion(Common::PageTable& page_table, VAddr base, u64 size) { ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size); ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base); MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, 0, Common::PageType::Unmapped); + + if (Settings::IsFastmemEnabled()) { + system.DeviceMemory().buffer.Unmap(base, size); + } } bool IsValidVirtualAddress(const Kernel::KProcess& process, const VAddr vaddr) const { @@ -466,6 +478,12 @@ struct Memory::Impl { if (vaddr == 0) { return; } + + if (Settings::IsFastmemEnabled()) { + const bool is_read_enable = Settings::IsGPULevelHigh() || !cached; + system.DeviceMemory().buffer.Protect(vaddr, size, is_read_enable, !cached); + } + // Iterate over a contiguous CPU address space, which corresponds to the specified GPU // address space, marking the region as un/cached. The region is marked un/cached at a // granularity of CPU pages, hence why we iterate on a CPU page basis (note: GPU page size |