summaryrefslogtreecommitdiffstats
path: root/src/core
diff options
context:
space:
mode:
Diffstat (limited to 'src/core')
-rw-r--r--src/core/CMakeLists.txt1
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic.cpp49
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic.h20
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic_32.cpp5
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic_64.cpp5
-rw-r--r--src/core/hle/kernel/k_page_table_base.cpp26
-rw-r--r--src/core/hle/kernel/k_page_table_base.h3
-rw-r--r--src/core/hle/kernel/k_process.cpp6
-rw-r--r--src/core/memory.cpp86
-rw-r--r--src/core/memory.h7
10 files changed, 171 insertions, 37 deletions
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt
index 96ab39cb8..e960edb47 100644
--- a/src/core/CMakeLists.txt
+++ b/src/core/CMakeLists.txt
@@ -978,6 +978,7 @@ endif()
if (ARCHITECTURE_x86_64 OR ARCHITECTURE_arm64)
target_sources(core PRIVATE
+ arm/dynarmic/arm_dynarmic.cpp
arm/dynarmic/arm_dynarmic.h
arm/dynarmic/arm_dynarmic_64.cpp
arm/dynarmic/arm_dynarmic_64.h
diff --git a/src/core/arm/dynarmic/arm_dynarmic.cpp b/src/core/arm/dynarmic/arm_dynarmic.cpp
new file mode 100644
index 000000000..e6e9fc45b
--- /dev/null
+++ b/src/core/arm/dynarmic/arm_dynarmic.cpp
@@ -0,0 +1,49 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#ifdef __linux__
+
+#include "common/signal_chain.h"
+
+#include "core/arm/dynarmic/arm_dynarmic.h"
+#include "core/hle/kernel/k_process.h"
+#include "core/memory.h"
+
+namespace Core {
+
+namespace {
+
+thread_local Core::Memory::Memory* g_current_memory{};
+std::once_flag g_registered{};
+struct sigaction g_old_segv {};
+
+void HandleSigSegv(int sig, siginfo_t* info, void* ctx) {
+ if (g_current_memory && g_current_memory->InvalidateSeparateHeap(info->si_addr)) {
+ return;
+ }
+
+ return g_old_segv.sa_sigaction(sig, info, ctx);
+}
+
+} // namespace
+
+ScopedJitExecution::ScopedJitExecution(Kernel::KProcess* process) {
+ g_current_memory = std::addressof(process->GetMemory());
+}
+
+ScopedJitExecution::~ScopedJitExecution() {
+ g_current_memory = nullptr;
+}
+
+void ScopedJitExecution::RegisterHandler() {
+ std::call_once(g_registered, [] {
+ struct sigaction sa {};
+ sa.sa_sigaction = &HandleSigSegv;
+ sa.sa_flags = SA_SIGINFO | SA_ONSTACK;
+ Common::SigAction(SIGSEGV, std::addressof(sa), std::addressof(g_old_segv));
+ });
+}
+
+} // namespace Core
+
+#endif
diff --git a/src/core/arm/dynarmic/arm_dynarmic.h b/src/core/arm/dynarmic/arm_dynarmic.h
index eef7c3116..53dd18815 100644
--- a/src/core/arm/dynarmic/arm_dynarmic.h
+++ b/src/core/arm/dynarmic/arm_dynarmic.h
@@ -26,4 +26,24 @@ constexpr HaltReason TranslateHaltReason(Dynarmic::HaltReason hr) {
return static_cast<HaltReason>(hr);
}
+#ifdef __linux__
+
+class ScopedJitExecution {
+public:
+ explicit ScopedJitExecution(Kernel::KProcess* process);
+ ~ScopedJitExecution();
+ static void RegisterHandler();
+};
+
+#else
+
+class ScopedJitExecution {
+public:
+ explicit ScopedJitExecution(Kernel::KProcess* process) {}
+ ~ScopedJitExecution() {}
+ static void RegisterHandler() {}
+};
+
+#endif
+
} // namespace Core
diff --git a/src/core/arm/dynarmic/arm_dynarmic_32.cpp b/src/core/arm/dynarmic/arm_dynarmic_32.cpp
index c78cfd528..36478f722 100644
--- a/src/core/arm/dynarmic/arm_dynarmic_32.cpp
+++ b/src/core/arm/dynarmic/arm_dynarmic_32.cpp
@@ -331,11 +331,15 @@ bool ArmDynarmic32::IsInThumbMode() const {
}
HaltReason ArmDynarmic32::RunThread(Kernel::KThread* thread) {
+ ScopedJitExecution sj(thread->GetOwnerProcess());
+
m_jit->ClearExclusiveState();
return TranslateHaltReason(m_jit->Run());
}
HaltReason ArmDynarmic32::StepThread(Kernel::KThread* thread) {
+ ScopedJitExecution sj(thread->GetOwnerProcess());
+
m_jit->ClearExclusiveState();
return TranslateHaltReason(m_jit->Step());
}
@@ -377,6 +381,7 @@ ArmDynarmic32::ArmDynarmic32(System& system, bool uses_wall_clock, Kernel::KProc
m_cp15(std::make_shared<DynarmicCP15>(*this)), m_core_index{core_index} {
auto& page_table_impl = process->GetPageTable().GetBasePageTable().GetImpl();
m_jit = MakeJit(&page_table_impl);
+ ScopedJitExecution::RegisterHandler();
}
ArmDynarmic32::~ArmDynarmic32() = default;
diff --git a/src/core/arm/dynarmic/arm_dynarmic_64.cpp b/src/core/arm/dynarmic/arm_dynarmic_64.cpp
index f351b13d9..c811c8ad5 100644
--- a/src/core/arm/dynarmic/arm_dynarmic_64.cpp
+++ b/src/core/arm/dynarmic/arm_dynarmic_64.cpp
@@ -362,11 +362,15 @@ std::shared_ptr<Dynarmic::A64::Jit> ArmDynarmic64::MakeJit(Common::PageTable* pa
}
HaltReason ArmDynarmic64::RunThread(Kernel::KThread* thread) {
+ ScopedJitExecution sj(thread->GetOwnerProcess());
+
m_jit->ClearExclusiveState();
return TranslateHaltReason(m_jit->Run());
}
HaltReason ArmDynarmic64::StepThread(Kernel::KThread* thread) {
+ ScopedJitExecution sj(thread->GetOwnerProcess());
+
m_jit->ClearExclusiveState();
return TranslateHaltReason(m_jit->Step());
}
@@ -406,6 +410,7 @@ ArmDynarmic64::ArmDynarmic64(System& system, bool uses_wall_clock, Kernel::KProc
auto& page_table = process->GetPageTable().GetBasePageTable();
auto& page_table_impl = page_table.GetImpl();
m_jit = MakeJit(&page_table_impl, page_table.GetAddressSpaceWidth());
+ ScopedJitExecution::RegisterHandler();
}
ArmDynarmic64::~ArmDynarmic64() = default;
diff --git a/src/core/hle/kernel/k_page_table_base.cpp b/src/core/hle/kernel/k_page_table_base.cpp
index 423289145..8c1549559 100644
--- a/src/core/hle/kernel/k_page_table_base.cpp
+++ b/src/core/hle/kernel/k_page_table_base.cpp
@@ -434,7 +434,7 @@ Result KPageTableBase::InitializeForProcess(Svc::CreateProcessFlag as_type, bool
void KPageTableBase::Finalize() {
auto HostUnmapCallback = [&](KProcessAddress addr, u64 size) {
if (Settings::IsFastmemEnabled()) {
- m_system.DeviceMemory().buffer.Unmap(GetInteger(addr), size);
+ m_system.DeviceMemory().buffer.Unmap(GetInteger(addr), size, false);
}
};
@@ -5243,7 +5243,7 @@ Result KPageTableBase::MapPhysicalMemory(KProcessAddress address, size_t size) {
// Unmap.
R_ASSERT(this->Operate(updater.GetPageList(), cur_address,
cur_pages, 0, false, unmap_properties,
- OperationType::Unmap, true));
+ OperationType::UnmapPhysical, true));
}
// Check if we're done.
@@ -5326,7 +5326,7 @@ Result KPageTableBase::MapPhysicalMemory(KProcessAddress address, size_t size) {
// Map the papges.
R_TRY(this->Operate(updater.GetPageList(), cur_address, map_pages,
cur_pg, map_properties,
- OperationType::MapFirstGroup, false));
+ OperationType::MapFirstGroupPhysical, false));
}
}
@@ -5480,7 +5480,7 @@ Result KPageTableBase::UnmapPhysicalMemory(KProcessAddress address, size_t size)
// Unmap.
R_ASSERT(this->Operate(updater.GetPageList(), cur_address, cur_pages, 0, false,
- unmap_properties, OperationType::Unmap, false));
+ unmap_properties, OperationType::UnmapPhysical, false));
}
// Check if we're done.
@@ -5655,7 +5655,10 @@ Result KPageTableBase::Operate(PageLinkedList* page_list, KProcessAddress virt_a
// or free them to the page list, and so it goes unused (along with page properties).
switch (operation) {
- case OperationType::Unmap: {
+ case OperationType::Unmap:
+ case OperationType::UnmapPhysical: {
+ const bool separate_heap = operation == OperationType::UnmapPhysical;
+
// Ensure that any pages we track are closed on exit.
KPageGroup pages_to_close(m_kernel, this->GetBlockInfoManager());
SCOPE_EXIT({ pages_to_close.CloseAndReset(); });
@@ -5664,7 +5667,7 @@ Result KPageTableBase::Operate(PageLinkedList* page_list, KProcessAddress virt_a
this->MakePageGroup(pages_to_close, virt_addr, num_pages);
// Unmap.
- m_memory->UnmapRegion(*m_impl, virt_addr, num_pages * PageSize);
+ m_memory->UnmapRegion(*m_impl, virt_addr, num_pages * PageSize, separate_heap);
R_SUCCEED();
}
@@ -5672,7 +5675,7 @@ Result KPageTableBase::Operate(PageLinkedList* page_list, KProcessAddress virt_a
ASSERT(virt_addr != 0);
ASSERT(Common::IsAligned(GetInteger(virt_addr), PageSize));
m_memory->MapMemoryRegion(*m_impl, virt_addr, num_pages * PageSize, phys_addr,
- ConvertToMemoryPermission(properties.perm));
+ ConvertToMemoryPermission(properties.perm), false);
// Open references to pages, if we should.
if (this->IsHeapPhysicalAddress(phys_addr)) {
@@ -5711,16 +5714,19 @@ Result KPageTableBase::Operate(PageLinkedList* page_list, KProcessAddress virt_a
switch (operation) {
case OperationType::MapGroup:
- case OperationType::MapFirstGroup: {
+ case OperationType::MapFirstGroup:
+ case OperationType::MapFirstGroupPhysical: {
+ const bool separate_heap = operation == OperationType::MapFirstGroupPhysical;
+
// We want to maintain a new reference to every page in the group.
- KScopedPageGroup spg(page_group, operation != OperationType::MapFirstGroup);
+ KScopedPageGroup spg(page_group, operation == OperationType::MapGroup);
for (const auto& node : page_group) {
const size_t size{node.GetNumPages() * PageSize};
// Map the pages.
m_memory->MapMemoryRegion(*m_impl, virt_addr, size, node.GetAddress(),
- ConvertToMemoryPermission(properties.perm));
+ ConvertToMemoryPermission(properties.perm), separate_heap);
virt_addr += size;
}
diff --git a/src/core/hle/kernel/k_page_table_base.h b/src/core/hle/kernel/k_page_table_base.h
index 556d230b3..077cafc96 100644
--- a/src/core/hle/kernel/k_page_table_base.h
+++ b/src/core/hle/kernel/k_page_table_base.h
@@ -104,6 +104,9 @@ protected:
ChangePermissionsAndRefresh = 5,
ChangePermissionsAndRefreshAndFlush = 6,
Separate = 7,
+
+ MapFirstGroupPhysical = 65000,
+ UnmapPhysical = 65001,
};
static constexpr size_t MaxPhysicalMapAlignment = 1_GiB;
diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp
index d6869c228..068e71dff 100644
--- a/src/core/hle/kernel/k_process.cpp
+++ b/src/core/hle/kernel/k_process.cpp
@@ -1237,8 +1237,10 @@ void KProcess::LoadModule(CodeSet code_set, KProcessAddress base_addr) {
auto& buffer = m_kernel.System().DeviceMemory().buffer;
const auto& code = code_set.CodeSegment();
const auto& patch = code_set.PatchSegment();
- buffer.Protect(GetInteger(base_addr + code.addr), code.size, true, true, true);
- buffer.Protect(GetInteger(base_addr + patch.addr), patch.size, true, true, true);
+ buffer.Protect(GetInteger(base_addr + code.addr), code.size,
+ Common::MemoryPermission::Read | Common::MemoryPermission::Execute);
+ buffer.Protect(GetInteger(base_addr + patch.addr), patch.size,
+ Common::MemoryPermission::Read | Common::MemoryPermission::Execute);
ReprotectSegment(code_set.PatchSegment(), Svc::MemoryPermission::None);
}
#endif
diff --git a/src/core/memory.cpp b/src/core/memory.cpp
index c7eb32c19..8176a41be 100644
--- a/src/core/memory.cpp
+++ b/src/core/memory.cpp
@@ -10,6 +10,7 @@
#include "common/assert.h"
#include "common/atomic_ops.h"
#include "common/common_types.h"
+#include "common/heap_tracker.h"
#include "common/logging/log.h"
#include "common/page_table.h"
#include "common/scope_exit.h"
@@ -52,10 +53,18 @@ struct Memory::Impl {
} else {
current_page_table->fastmem_arena = nullptr;
}
+
+#ifdef __linux__
+ heap_tracker.emplace(system.DeviceMemory().buffer);
+ buffer = std::addressof(*heap_tracker);
+#else
+ buffer = std::addressof(system.DeviceMemory().buffer);
+#endif
}
void MapMemoryRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size,
- Common::PhysicalAddress target, Common::MemoryPermission perms) {
+ Common::PhysicalAddress target, Common::MemoryPermission perms,
+ bool separate_heap) {
ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size);
ASSERT_MSG((base & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", GetInteger(base));
ASSERT_MSG(target >= DramMemoryMap::Base, "Out of bounds target: {:016X}",
@@ -64,19 +73,20 @@ struct Memory::Impl {
Common::PageType::Memory);
if (current_page_table->fastmem_arena) {
- system.DeviceMemory().buffer.Map(GetInteger(base),
- GetInteger(target) - DramMemoryMap::Base, size, perms);
+ buffer->Map(GetInteger(base), GetInteger(target) - DramMemoryMap::Base, size, perms,
+ separate_heap);
}
}
- void UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size) {
+ void UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size,
+ bool separate_heap) {
ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size);
ASSERT_MSG((base & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", GetInteger(base));
MapPages(page_table, base / YUZU_PAGESIZE, size / YUZU_PAGESIZE, 0,
Common::PageType::Unmapped);
if (current_page_table->fastmem_arena) {
- system.DeviceMemory().buffer.Unmap(GetInteger(base), size);
+ buffer->Unmap(GetInteger(base), size, separate_heap);
}
}
@@ -89,11 +99,6 @@ struct Memory::Impl {
return;
}
- const bool is_r = True(perms & Common::MemoryPermission::Read);
- const bool is_w = True(perms & Common::MemoryPermission::Write);
- const bool is_x =
- True(perms & Common::MemoryPermission::Execute) && Settings::IsNceEnabled();
-
u64 protect_bytes{};
u64 protect_begin{};
for (u64 addr = vaddr; addr < vaddr + size; addr += YUZU_PAGESIZE) {
@@ -102,8 +107,7 @@ struct Memory::Impl {
switch (page_type) {
case Common::PageType::RasterizerCachedMemory:
if (protect_bytes > 0) {
- system.DeviceMemory().buffer.Protect(protect_begin, protect_bytes, is_r, is_w,
- is_x);
+ buffer->Protect(protect_begin, protect_bytes, perms);
protect_bytes = 0;
}
break;
@@ -116,7 +120,7 @@ struct Memory::Impl {
}
if (protect_bytes > 0) {
- system.DeviceMemory().buffer.Protect(protect_begin, protect_bytes, is_r, is_w, is_x);
+ buffer->Protect(protect_begin, protect_bytes, perms);
}
}
@@ -486,7 +490,9 @@ struct Memory::Impl {
}
if (current_page_table->fastmem_arena) {
- system.DeviceMemory().buffer.Protect(vaddr, size, !debug, !debug);
+ const auto perm{debug ? Common::MemoryPermission{}
+ : Common::MemoryPermission::ReadWrite};
+ buffer->Protect(vaddr, size, perm);
}
// Iterate over a contiguous CPU address space, marking/unmarking the region.
@@ -543,9 +549,14 @@ struct Memory::Impl {
}
if (current_page_table->fastmem_arena) {
- const bool is_read_enable =
- !Settings::values.use_reactive_flushing.GetValue() || !cached;
- system.DeviceMemory().buffer.Protect(vaddr, size, is_read_enable, !cached);
+ Common::MemoryPermission perm{};
+ if (!Settings::values.use_reactive_flushing.GetValue() || !cached) {
+ perm |= Common::MemoryPermission::Read;
+ }
+ if (!cached) {
+ perm |= Common::MemoryPermission::Write;
+ }
+ buffer->Protect(vaddr, size, perm);
}
// Iterate over a contiguous CPU address space, which corresponds to the specified GPU
@@ -856,6 +867,13 @@ struct Memory::Impl {
std::array<GPUDirtyState, Core::Hardware::NUM_CPU_CORES> rasterizer_write_areas{};
std::span<Core::GPUDirtyMemoryManager> gpu_dirty_managers;
std::mutex sys_core_guard;
+
+ std::optional<Common::HeapTracker> heap_tracker;
+#ifdef __linux__
+ Common::HeapTracker* buffer{};
+#else
+ Common::HostMemory* buffer{};
+#endif
};
Memory::Memory(Core::System& system_) : system{system_} {
@@ -873,12 +891,14 @@ void Memory::SetCurrentPageTable(Kernel::KProcess& process) {
}
void Memory::MapMemoryRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size,
- Common::PhysicalAddress target, Common::MemoryPermission perms) {
- impl->MapMemoryRegion(page_table, base, size, target, perms);
+ Common::PhysicalAddress target, Common::MemoryPermission perms,
+ bool separate_heap) {
+ impl->MapMemoryRegion(page_table, base, size, target, perms, separate_heap);
}
-void Memory::UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size) {
- impl->UnmapRegion(page_table, base, size);
+void Memory::UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size,
+ bool separate_heap) {
+ impl->UnmapRegion(page_table, base, size, separate_heap);
}
void Memory::ProtectRegion(Common::PageTable& page_table, Common::ProcessAddress vaddr, u64 size,
@@ -1048,7 +1068,9 @@ void Memory::FlushRegion(Common::ProcessAddress dest_addr, size_t size) {
}
bool Memory::InvalidateNCE(Common::ProcessAddress vaddr, size_t size) {
- bool mapped = true;
+ [[maybe_unused]] bool mapped = true;
+ [[maybe_unused]] bool rasterizer = false;
+
u8* const ptr = impl->GetPointerImpl(
GetInteger(vaddr),
[&] {
@@ -1056,8 +1078,26 @@ bool Memory::InvalidateNCE(Common::ProcessAddress vaddr, size_t size) {
GetInteger(vaddr));
mapped = false;
},
- [&] { impl->system.GPU().InvalidateRegion(GetInteger(vaddr), size); });
+ [&] {
+ impl->system.GPU().InvalidateRegion(GetInteger(vaddr), size);
+ rasterizer = true;
+ });
+
+#ifdef __linux__
+ if (!rasterizer && mapped) {
+ impl->buffer->DeferredMapSeparateHeap(GetInteger(vaddr));
+ }
+#endif
+
return mapped && ptr != nullptr;
}
+bool Memory::InvalidateSeparateHeap(void* fault_address) {
+#ifdef __linux__
+ return impl->buffer->DeferredMapSeparateHeap(static_cast<u8*>(fault_address));
+#else
+ return false;
+#endif
+}
+
} // namespace Core::Memory
diff --git a/src/core/memory.h b/src/core/memory.h
index c1879e78f..3e4d03f57 100644
--- a/src/core/memory.h
+++ b/src/core/memory.h
@@ -86,7 +86,8 @@ public:
* @param perms The permissions to map the memory with.
*/
void MapMemoryRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size,
- Common::PhysicalAddress target, Common::MemoryPermission perms);
+ Common::PhysicalAddress target, Common::MemoryPermission perms,
+ bool separate_heap);
/**
* Unmaps a region of the emulated process address space.
@@ -95,7 +96,8 @@ public:
* @param base The address to begin unmapping at.
* @param size The amount of bytes to unmap.
*/
- void UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size);
+ void UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size,
+ bool separate_heap);
/**
* Protects a region of the emulated process address space with the new permissions.
@@ -486,6 +488,7 @@ public:
void SetGPUDirtyManagers(std::span<Core::GPUDirtyMemoryManager> managers);
void InvalidateRegion(Common::ProcessAddress dest_addr, size_t size);
bool InvalidateNCE(Common::ProcessAddress vaddr, size_t size);
+ bool InvalidateSeparateHeap(void* fault_address);
void FlushRegion(Common::ProcessAddress dest_addr, size_t size);
private: