summaryrefslogtreecommitdiffstats
path: root/src/core/memory.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/memory.cpp')
-rw-r--r--src/core/memory.cpp102
1 files changed, 90 insertions, 12 deletions
diff --git a/src/core/memory.cpp b/src/core/memory.cpp
index fa5273402..5b376b202 100644
--- a/src/core/memory.cpp
+++ b/src/core/memory.cpp
@@ -1,8 +1,10 @@
// SPDX-FileCopyrightText: 2015 Citra Emulator Project
+// SPDX-FileCopyrightText: 2018 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include <algorithm>
#include <cstring>
+#include <mutex>
#include <span>
#include "common/assert.h"
@@ -10,6 +12,7 @@
#include "common/common_types.h"
#include "common/logging/log.h"
#include "common/page_table.h"
+#include "common/scope_exit.h"
#include "common/settings.h"
#include "common/swap.h"
#include "core/core.h"
@@ -41,7 +44,7 @@ struct Memory::Impl {
explicit Impl(Core::System& system_) : system{system_} {}
void SetCurrentPageTable(Kernel::KProcess& process, u32 core_id) {
- current_page_table = &process.GetPageTable().PageTableImpl();
+ current_page_table = &process.GetPageTable().GetImpl();
current_page_table->fastmem_arena = system.DeviceMemory().buffer.VirtualBasePointer();
const std::size_t address_space_width = process.GetPageTable().GetAddressSpaceWidth();
@@ -50,7 +53,7 @@ struct Memory::Impl {
}
void MapMemoryRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size,
- Common::PhysicalAddress target) {
+ Common::PhysicalAddress target, Common::MemoryPermission perms) {
ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size);
ASSERT_MSG((base & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", GetInteger(base));
ASSERT_MSG(target >= DramMemoryMap::Base, "Out of bounds target: {:016X}",
@@ -60,7 +63,7 @@ struct Memory::Impl {
if (Settings::IsFastmemEnabled()) {
system.DeviceMemory().buffer.Map(GetInteger(base),
- GetInteger(target) - DramMemoryMap::Base, size);
+ GetInteger(target) - DramMemoryMap::Base, size, perms);
}
}
@@ -75,6 +78,51 @@ struct Memory::Impl {
}
}
+ void ProtectRegion(Common::PageTable& page_table, VAddr vaddr, u64 size,
+ Common::MemoryPermission perms) {
+ ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size);
+ ASSERT_MSG((vaddr & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", vaddr);
+
+ if (!Settings::IsFastmemEnabled()) {
+ return;
+ }
+
+ const bool is_r = True(perms & Common::MemoryPermission::Read);
+ const bool is_w = True(perms & Common::MemoryPermission::Write);
+ const bool is_x =
+ True(perms & Common::MemoryPermission::Execute) && Settings::IsNceEnabled();
+
+ if (!current_page_table) {
+ system.DeviceMemory().buffer.Protect(vaddr, size, is_r, is_w, is_x);
+ return;
+ }
+
+ u64 protect_bytes{};
+ u64 protect_begin{};
+ for (u64 addr = vaddr; addr < vaddr + size; addr += YUZU_PAGESIZE) {
+ const Common::PageType page_type{
+ current_page_table->pointers[addr >> YUZU_PAGEBITS].Type()};
+ switch (page_type) {
+ case Common::PageType::RasterizerCachedMemory:
+ if (protect_bytes > 0) {
+ system.DeviceMemory().buffer.Protect(protect_begin, protect_bytes, is_r, is_w,
+ is_x);
+ protect_bytes = 0;
+ }
+ break;
+ default:
+ if (protect_bytes == 0) {
+ protect_begin = addr;
+ }
+ protect_bytes += YUZU_PAGESIZE;
+ }
+ }
+
+ if (protect_bytes > 0) {
+ system.DeviceMemory().buffer.Protect(protect_begin, protect_bytes, is_r, is_w, is_x);
+ }
+ }
+
[[nodiscard]] u8* GetPointerFromRasterizerCachedMemory(u64 vaddr) const {
const Common::PhysicalAddress paddr{
current_page_table->backing_addr[vaddr >> YUZU_PAGEBITS]};
@@ -195,7 +243,7 @@ struct Memory::Impl {
bool WalkBlock(const Common::ProcessAddress addr, const std::size_t size, auto on_unmapped,
auto on_memory, auto on_rasterizer, auto increment) {
- const auto& page_table = system.ApplicationProcess()->GetPageTable().PageTableImpl();
+ const auto& page_table = system.ApplicationProcess()->GetPageTable().GetImpl();
std::size_t remaining_size = size;
std::size_t page_index = addr >> YUZU_PAGEBITS;
std::size_t page_offset = addr & YUZU_PAGEMASK;
@@ -318,7 +366,7 @@ struct Memory::Impl {
[&](const Common::ProcessAddress current_vaddr, const std::size_t copy_amount,
u8* const host_ptr) {
if constexpr (!UNSAFE) {
- system.GPU().InvalidateRegion(GetInteger(current_vaddr), copy_amount);
+ HandleRasterizerWrite(GetInteger(current_vaddr), copy_amount);
}
std::memcpy(host_ptr, src_buffer, copy_amount);
},
@@ -351,7 +399,7 @@ struct Memory::Impl {
},
[&](const Common::ProcessAddress current_vaddr, const std::size_t copy_amount,
u8* const host_ptr) {
- system.GPU().InvalidateRegion(GetInteger(current_vaddr), copy_amount);
+ HandleRasterizerWrite(GetInteger(current_vaddr), copy_amount);
std::memset(host_ptr, 0, copy_amount);
},
[](const std::size_t copy_amount) {});
@@ -420,7 +468,7 @@ struct Memory::Impl {
const std::size_t block_size) {
// dc cvac: Store to point of coherency
// CPU flush -> GPU invalidate
- system.GPU().InvalidateRegion(GetInteger(current_vaddr), block_size);
+ HandleRasterizerWrite(GetInteger(current_vaddr), block_size);
};
return PerformCacheOperation(dest_addr, size, on_rasterizer);
}
@@ -430,7 +478,7 @@ struct Memory::Impl {
const std::size_t block_size) {
// dc civac: Store to point of coherency, and invalidate from cache
// CPU flush -> GPU invalidate
- system.GPU().InvalidateRegion(GetInteger(current_vaddr), block_size);
+ HandleRasterizerWrite(GetInteger(current_vaddr), block_size);
};
return PerformCacheOperation(dest_addr, size, on_rasterizer);
}
@@ -767,7 +815,18 @@ struct Memory::Impl {
}
void HandleRasterizerWrite(VAddr address, size_t size) {
- const size_t core = system.GetCurrentHostThreadID();
+ constexpr size_t sys_core = Core::Hardware::NUM_CPU_CORES - 1;
+ const size_t core = std::min(system.GetCurrentHostThreadID(),
+ sys_core); // any other calls threads go to syscore.
+ // Guard on sys_core;
+ if (core == sys_core) [[unlikely]] {
+ sys_core_guard.lock();
+ }
+ SCOPE_EXIT({
+ if (core == sys_core) [[unlikely]] {
+ sys_core_guard.unlock();
+ }
+ });
auto& current_area = rasterizer_write_areas[core];
VAddr subaddress = address >> YUZU_PAGEBITS;
bool do_collection = current_area.last_address == subaddress;
@@ -799,6 +858,7 @@ struct Memory::Impl {
rasterizer_read_areas{};
std::array<GPUDirtyState, Core::Hardware::NUM_CPU_CORES> rasterizer_write_areas{};
std::span<Core::GPUDirtyMemoryManager> gpu_dirty_managers;
+ std::mutex sys_core_guard;
};
Memory::Memory(Core::System& system_) : system{system_} {
@@ -816,17 +876,22 @@ void Memory::SetCurrentPageTable(Kernel::KProcess& process, u32 core_id) {
}
void Memory::MapMemoryRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size,
- Common::PhysicalAddress target) {
- impl->MapMemoryRegion(page_table, base, size, target);
+ Common::PhysicalAddress target, Common::MemoryPermission perms) {
+ impl->MapMemoryRegion(page_table, base, size, target, perms);
}
void Memory::UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size) {
impl->UnmapRegion(page_table, base, size);
}
+void Memory::ProtectRegion(Common::PageTable& page_table, Common::ProcessAddress vaddr, u64 size,
+ Common::MemoryPermission perms) {
+ impl->ProtectRegion(page_table, GetInteger(vaddr), size, perms);
+}
+
bool Memory::IsValidVirtualAddress(const Common::ProcessAddress vaddr) const {
const Kernel::KProcess& process = *system.ApplicationProcess();
- const auto& page_table = process.GetPageTable().PageTableImpl();
+ const auto& page_table = process.GetPageTable().GetImpl();
const size_t page = vaddr >> YUZU_PAGEBITS;
if (page >= page_table.pointers.size()) {
return false;
@@ -986,4 +1051,17 @@ void Memory::FlushRegion(Common::ProcessAddress dest_addr, size_t size) {
impl->FlushRegion(dest_addr, size);
}
+bool Memory::InvalidateNCE(Common::ProcessAddress vaddr, size_t size) {
+ bool mapped = true;
+ u8* const ptr = impl->GetPointerImpl(
+ GetInteger(vaddr),
+ [&] {
+ LOG_ERROR(HW_Memory, "Unmapped InvalidateNCE for {} bytes @ {:#x}", size,
+ GetInteger(vaddr));
+ mapped = false;
+ },
+ [&] { impl->system.GPU().InvalidateRegion(GetInteger(vaddr), size); });
+ return mapped && ptr != nullptr;
+}
+
} // namespace Core::Memory