summaryrefslogtreecommitdiffstats
path: root/src/core
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic_32.cpp1
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic_64.cpp1
-rw-r--r--src/core/hle/kernel/memory/page_table.cpp2
-rw-r--r--src/core/memory.cpp187
4 files changed, 67 insertions, 124 deletions
diff --git a/src/core/arm/dynarmic/arm_dynarmic_32.cpp b/src/core/arm/dynarmic/arm_dynarmic_32.cpp
index e9c74b1a6..8aaf11eee 100644
--- a/src/core/arm/dynarmic/arm_dynarmic_32.cpp
+++ b/src/core/arm/dynarmic/arm_dynarmic_32.cpp
@@ -133,6 +133,7 @@ std::shared_ptr<Dynarmic::A32::Jit> ARM_Dynarmic_32::MakeJit(Common::PageTable&
config.page_table = reinterpret_cast<std::array<std::uint8_t*, NUM_PAGE_TABLE_ENTRIES>*>(
page_table.pointers.data());
config.absolute_offset_page_table = true;
+ config.page_table_pointer_mask_bits = Common::PageTable::ATTRIBUTE_BITS;
config.detect_misaligned_access_via_page_table = 16 | 32 | 64 | 128;
config.only_detect_misalignment_via_page_table_on_page_boundary = true;
diff --git a/src/core/arm/dynarmic/arm_dynarmic_64.cpp b/src/core/arm/dynarmic/arm_dynarmic_64.cpp
index 7a4eb88a2..d2e1dc724 100644
--- a/src/core/arm/dynarmic/arm_dynarmic_64.cpp
+++ b/src/core/arm/dynarmic/arm_dynarmic_64.cpp
@@ -152,6 +152,7 @@ std::shared_ptr<Dynarmic::A64::Jit> ARM_Dynarmic_64::MakeJit(Common::PageTable&
// Memory
config.page_table = reinterpret_cast<void**>(page_table.pointers.data());
config.page_table_address_space_bits = address_space_bits;
+ config.page_table_pointer_mask_bits = Common::PageTable::ATTRIBUTE_BITS;
config.silently_mirror_page_table = false;
config.absolute_offset_page_table = true;
config.detect_misaligned_access_via_page_table = 16 | 32 | 64 | 128;
diff --git a/src/core/hle/kernel/memory/page_table.cpp b/src/core/hle/kernel/memory/page_table.cpp
index f53a7be82..f3e8bc333 100644
--- a/src/core/hle/kernel/memory/page_table.cpp
+++ b/src/core/hle/kernel/memory/page_table.cpp
@@ -265,7 +265,7 @@ ResultCode PageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_t
physical_memory_usage = 0;
memory_pool = pool;
- page_table_impl.Resize(address_space_width, PageBits, true);
+ page_table_impl.Resize(address_space_width, PageBits);
return InitializeMemoryLayout(start, end);
}
diff --git a/src/core/memory.cpp b/src/core/memory.cpp
index 54a848936..f209c4949 100644
--- a/src/core/memory.cpp
+++ b/src/core/memory.cpp
@@ -4,7 +4,6 @@
#include <algorithm>
#include <cstring>
-#include <mutex>
#include <optional>
#include <utility>
@@ -68,21 +67,8 @@ struct Memory::Impl {
bool IsValidVirtualAddress(const Kernel::Process& process, const VAddr vaddr) const {
const auto& page_table = process.PageTable().PageTableImpl();
-
- const u8* const page_pointer = page_table.pointers[vaddr >> PAGE_BITS];
- if (page_pointer != nullptr) {
- return true;
- }
-
- if (page_table.attributes[vaddr >> PAGE_BITS] == Common::PageType::RasterizerCachedMemory) {
- return true;
- }
-
- if (page_table.attributes[vaddr >> PAGE_BITS] != Common::PageType::Special) {
- return false;
- }
-
- return false;
+ const auto [pointer, type] = page_table.pointers[vaddr >> PAGE_BITS].PointerType();
+ return pointer != nullptr || type == Common::PageType::RasterizerCachedMemory;
}
bool IsValidVirtualAddress(VAddr vaddr) const {
@@ -100,17 +86,15 @@ struct Memory::Impl {
}
u8* GetPointer(const VAddr vaddr) const {
- u8* const page_pointer{current_page_table->pointers[vaddr >> PAGE_BITS]};
- if (page_pointer) {
- return page_pointer + vaddr;
+ const uintptr_t raw_pointer = current_page_table->pointers[vaddr >> PAGE_BITS].Raw();
+ if (u8* const pointer = Common::PageTable::PageInfo::ExtractPointer(raw_pointer)) {
+ return pointer + vaddr;
}
-
- if (current_page_table->attributes[vaddr >> PAGE_BITS] ==
- Common::PageType::RasterizerCachedMemory) {
+ const auto type = Common::PageTable::PageInfo::ExtractType(raw_pointer);
+ if (type == Common::PageType::RasterizerCachedMemory) {
return GetPointerFromRasterizerCachedMemory(vaddr);
}
-
- return {};
+ return nullptr;
}
u8 Read8(const VAddr addr) {
@@ -222,7 +206,8 @@ struct Memory::Impl {
std::min(static_cast<std::size_t>(PAGE_SIZE) - page_offset, remaining_size);
const auto current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset);
- switch (page_table.attributes[page_index]) {
+ const auto [pointer, type] = page_table.pointers[page_index].PointerType();
+ switch (type) {
case Common::PageType::Unmapped: {
LOG_ERROR(HW_Memory,
"Unmapped ReadBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
@@ -231,10 +216,8 @@ struct Memory::Impl {
break;
}
case Common::PageType::Memory: {
- DEBUG_ASSERT(page_table.pointers[page_index]);
-
- const u8* const src_ptr =
- page_table.pointers[page_index] + page_offset + (page_index << PAGE_BITS);
+ DEBUG_ASSERT(pointer);
+ const u8* const src_ptr = pointer + page_offset + (page_index << PAGE_BITS);
std::memcpy(dest_buffer, src_ptr, copy_amount);
break;
}
@@ -268,7 +251,8 @@ struct Memory::Impl {
std::min(static_cast<std::size_t>(PAGE_SIZE) - page_offset, remaining_size);
const auto current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset);
- switch (page_table.attributes[page_index]) {
+ const auto [pointer, type] = page_table.pointers[page_index].PointerType();
+ switch (type) {
case Common::PageType::Unmapped: {
LOG_ERROR(HW_Memory,
"Unmapped ReadBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
@@ -277,10 +261,8 @@ struct Memory::Impl {
break;
}
case Common::PageType::Memory: {
- DEBUG_ASSERT(page_table.pointers[page_index]);
-
- const u8* const src_ptr =
- page_table.pointers[page_index] + page_offset + (page_index << PAGE_BITS);
+ DEBUG_ASSERT(pointer);
+ const u8* const src_ptr = pointer + page_offset + (page_index << PAGE_BITS);
std::memcpy(dest_buffer, src_ptr, copy_amount);
break;
}
@@ -320,7 +302,8 @@ struct Memory::Impl {
std::min(static_cast<std::size_t>(PAGE_SIZE) - page_offset, remaining_size);
const auto current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset);
- switch (page_table.attributes[page_index]) {
+ const auto [pointer, type] = page_table.pointers[page_index].PointerType();
+ switch (type) {
case Common::PageType::Unmapped: {
LOG_ERROR(HW_Memory,
"Unmapped WriteBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
@@ -328,10 +311,8 @@ struct Memory::Impl {
break;
}
case Common::PageType::Memory: {
- DEBUG_ASSERT(page_table.pointers[page_index]);
-
- u8* const dest_ptr =
- page_table.pointers[page_index] + page_offset + (page_index << PAGE_BITS);
+ DEBUG_ASSERT(pointer);
+ u8* const dest_ptr = pointer + page_offset + (page_index << PAGE_BITS);
std::memcpy(dest_ptr, src_buffer, copy_amount);
break;
}
@@ -364,7 +345,8 @@ struct Memory::Impl {
std::min(static_cast<std::size_t>(PAGE_SIZE) - page_offset, remaining_size);
const auto current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset);
- switch (page_table.attributes[page_index]) {
+ const auto [pointer, type] = page_table.pointers[page_index].PointerType();
+ switch (type) {
case Common::PageType::Unmapped: {
LOG_ERROR(HW_Memory,
"Unmapped WriteBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
@@ -372,10 +354,8 @@ struct Memory::Impl {
break;
}
case Common::PageType::Memory: {
- DEBUG_ASSERT(page_table.pointers[page_index]);
-
- u8* const dest_ptr =
- page_table.pointers[page_index] + page_offset + (page_index << PAGE_BITS);
+ DEBUG_ASSERT(pointer);
+ u8* const dest_ptr = pointer + page_offset + (page_index << PAGE_BITS);
std::memcpy(dest_ptr, src_buffer, copy_amount);
break;
}
@@ -414,7 +394,8 @@ struct Memory::Impl {
std::min(static_cast<std::size_t>(PAGE_SIZE) - page_offset, remaining_size);
const auto current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset);
- switch (page_table.attributes[page_index]) {
+ const auto [pointer, type] = page_table.pointers[page_index].PointerType();
+ switch (type) {
case Common::PageType::Unmapped: {
LOG_ERROR(HW_Memory,
"Unmapped ZeroBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
@@ -422,10 +403,8 @@ struct Memory::Impl {
break;
}
case Common::PageType::Memory: {
- DEBUG_ASSERT(page_table.pointers[page_index]);
-
- u8* dest_ptr =
- page_table.pointers[page_index] + page_offset + (page_index << PAGE_BITS);
+ DEBUG_ASSERT(pointer);
+ u8* const dest_ptr = pointer + page_offset + (page_index << PAGE_BITS);
std::memset(dest_ptr, 0, copy_amount);
break;
}
@@ -461,7 +440,8 @@ struct Memory::Impl {
std::min(static_cast<std::size_t>(PAGE_SIZE) - page_offset, remaining_size);
const auto current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset);
- switch (page_table.attributes[page_index]) {
+ const auto [pointer, type] = page_table.pointers[page_index].PointerType();
+ switch (type) {
case Common::PageType::Unmapped: {
LOG_ERROR(HW_Memory,
"Unmapped CopyBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
@@ -470,9 +450,8 @@ struct Memory::Impl {
break;
}
case Common::PageType::Memory: {
- DEBUG_ASSERT(page_table.pointers[page_index]);
- const u8* src_ptr =
- page_table.pointers[page_index] + page_offset + (page_index << PAGE_BITS);
+ DEBUG_ASSERT(pointer);
+ const u8* src_ptr = pointer + page_offset + (page_index << PAGE_BITS);
WriteBlock(process, dest_addr, src_ptr, copy_amount);
break;
}
@@ -498,34 +477,19 @@ struct Memory::Impl {
return CopyBlock(*system.CurrentProcess(), dest_addr, src_addr, size);
}
- struct PageEntry {
- u8* const pointer;
- const Common::PageType attribute;
- };
-
- PageEntry SafePageEntry(std::size_t base) const {
- std::lock_guard lock{rasterizer_cache_guard};
- return {
- .pointer = current_page_table->pointers[base],
- .attribute = current_page_table->attributes[base],
- };
- }
-
void RasterizerMarkRegionCached(VAddr vaddr, u64 size, bool cached) {
- std::lock_guard lock{rasterizer_cache_guard};
if (vaddr == 0) {
return;
}
-
// Iterate over a contiguous CPU address space, which corresponds to the specified GPU
// address space, marking the region as un/cached. The region is marked un/cached at a
// granularity of CPU pages, hence why we iterate on a CPU page basis (note: GPU page size
// is different). This assumes the specified GPU address region is contiguous as well.
- u64 num_pages = ((vaddr + size - 1) >> PAGE_BITS) - (vaddr >> PAGE_BITS) + 1;
- for (unsigned i = 0; i < num_pages; ++i, vaddr += PAGE_SIZE) {
- Common::PageType& page_type{current_page_table->attributes[vaddr >> PAGE_BITS]};
-
+ const u64 num_pages = ((vaddr + size - 1) >> PAGE_BITS) - (vaddr >> PAGE_BITS) + 1;
+ for (u64 i = 0; i < num_pages; ++i, vaddr += PAGE_SIZE) {
+ const Common::PageType page_type{
+ current_page_table->pointers[vaddr >> PAGE_BITS].Type()};
if (cached) {
// Switch page type to cached if now cached
switch (page_type) {
@@ -534,8 +498,8 @@ struct Memory::Impl {
// space, for example, a system module need not have a VRAM mapping.
break;
case Common::PageType::Memory:
- page_type = Common::PageType::RasterizerCachedMemory;
- current_page_table->pointers[vaddr >> PAGE_BITS] = nullptr;
+ current_page_table->pointers[vaddr >> PAGE_BITS].Store(
+ nullptr, Common::PageType::RasterizerCachedMemory);
break;
case Common::PageType::RasterizerCachedMemory:
// There can be more than one GPU region mapped per CPU region, so it's common
@@ -556,16 +520,16 @@ struct Memory::Impl {
// that this area is already unmarked as cached.
break;
case Common::PageType::RasterizerCachedMemory: {
- u8* pointer{GetPointerFromRasterizerCachedMemory(vaddr & ~PAGE_MASK)};
+ u8* const pointer{GetPointerFromRasterizerCachedMemory(vaddr & ~PAGE_MASK)};
if (pointer == nullptr) {
// It's possible that this function has been called while updating the
// pagetable after unmapping a VMA. In that case the underlying VMA will no
// longer exist, and we should just leave the pagetable entry blank.
- page_type = Common::PageType::Unmapped;
+ current_page_table->pointers[vaddr >> PAGE_BITS].Store(
+ nullptr, Common::PageType::Unmapped);
} else {
- current_page_table->pointers[vaddr >> PAGE_BITS] =
- pointer - (vaddr & ~PAGE_MASK);
- page_type = Common::PageType::Memory;
+ current_page_table->pointers[vaddr >> PAGE_BITS].Store(
+ pointer - (vaddr & ~PAGE_MASK), Common::PageType::Memory);
}
break;
}
@@ -595,7 +559,7 @@ struct Memory::Impl {
auto& gpu = system.GPU();
for (u64 i = 0; i < size; i++) {
const auto page = base + i;
- if (page_table.attributes[page] == Common::PageType::RasterizerCachedMemory) {
+ if (page_table.pointers[page].Type() == Common::PageType::RasterizerCachedMemory) {
gpu.FlushAndInvalidateRegion(page << PAGE_BITS, PAGE_SIZE);
}
}
@@ -610,20 +574,18 @@ struct Memory::Impl {
"Mapping memory page without a pointer @ {:016x}", base * PAGE_SIZE);
while (base != end) {
- page_table.attributes[base] = type;
- page_table.pointers[base] = nullptr;
+ page_table.pointers[base].Store(nullptr, type);
page_table.backing_addr[base] = 0;
base += 1;
}
} else {
while (base != end) {
- page_table.pointers[base] =
- system.DeviceMemory().GetPointer(target) - (base << PAGE_BITS);
- page_table.attributes[base] = type;
+ page_table.pointers[base].Store(
+ system.DeviceMemory().GetPointer(target) - (base << PAGE_BITS), type);
page_table.backing_addr[base] = target - (base << PAGE_BITS);
- ASSERT_MSG(page_table.pointers[base],
+ ASSERT_MSG(page_table.pointers[base].Pointer(),
"memory mapping base yield a nullptr within the table");
base += 1;
@@ -646,21 +608,13 @@ struct Memory::Impl {
template <typename T>
T Read(const VAddr vaddr) {
// Avoid adding any extra logic to this fast-path block
- if (const u8* const pointer = current_page_table->pointers[vaddr >> PAGE_BITS]) {
+ const uintptr_t raw_pointer = current_page_table->pointers[vaddr >> PAGE_BITS].Raw();
+ if (const u8* const pointer = Common::PageTable::PageInfo::ExtractPointer(raw_pointer)) {
T value;
std::memcpy(&value, &pointer[vaddr], sizeof(T));
return value;
}
-
- // Otherwise, we need to grab the page with a lock, in case it is currently being modified
- const auto entry = SafePageEntry(vaddr >> PAGE_BITS);
- if (entry.pointer) {
- T value;
- std::memcpy(&value, &entry.pointer[vaddr], sizeof(T));
- return value;
- }
-
- switch (entry.attribute) {
+ switch (Common::PageTable::PageInfo::ExtractType(raw_pointer)) {
case Common::PageType::Unmapped:
LOG_ERROR(HW_Memory, "Unmapped Read{} @ 0x{:08X}", sizeof(T) * 8, vaddr);
return 0;
@@ -692,20 +646,12 @@ struct Memory::Impl {
template <typename T>
void Write(const VAddr vaddr, const T data) {
// Avoid adding any extra logic to this fast-path block
- if (u8* const pointer = current_page_table->pointers[vaddr >> PAGE_BITS]) {
+ const uintptr_t raw_pointer = current_page_table->pointers[vaddr >> PAGE_BITS].Raw();
+ if (u8* const pointer = Common::PageTable::PageInfo::ExtractPointer(raw_pointer)) {
std::memcpy(&pointer[vaddr], &data, sizeof(T));
return;
}
-
- // Otherwise, we need to grab the page with a lock, in case it is currently being modified
- const auto entry = SafePageEntry(vaddr >> PAGE_BITS);
- if (entry.pointer) {
- // Memory was mapped, we are done
- std::memcpy(&entry.pointer[vaddr], &data, sizeof(T));
- return;
- }
-
- switch (entry.attribute) {
+ switch (Common::PageTable::PageInfo::ExtractType(raw_pointer)) {
case Common::PageType::Unmapped:
LOG_ERROR(HW_Memory, "Unmapped Write{} 0x{:08X} @ 0x{:016X}", sizeof(data) * 8,
static_cast<u32>(data), vaddr);
@@ -726,15 +672,13 @@ struct Memory::Impl {
template <typename T>
bool WriteExclusive(const VAddr vaddr, const T data, const T expected) {
- u8* page_pointer = current_page_table->pointers[vaddr >> PAGE_BITS];
- if (page_pointer != nullptr) {
+ const uintptr_t raw_pointer = current_page_table->pointers[vaddr >> PAGE_BITS].Raw();
+ if (u8* const pointer = Common::PageTable::PageInfo::ExtractPointer(raw_pointer)) {
// NOTE: Avoid adding any extra logic to this fast-path block
- auto* pointer = reinterpret_cast<volatile T*>(&page_pointer[vaddr]);
- return Common::AtomicCompareAndSwap(pointer, data, expected);
+ const auto volatile_pointer = reinterpret_cast<volatile T*>(&pointer[vaddr]);
+ return Common::AtomicCompareAndSwap(volatile_pointer, data, expected);
}
-
- const Common::PageType type = current_page_table->attributes[vaddr >> PAGE_BITS];
- switch (type) {
+ switch (Common::PageTable::PageInfo::ExtractType(raw_pointer)) {
case Common::PageType::Unmapped:
LOG_ERROR(HW_Memory, "Unmapped Write{} 0x{:08X} @ 0x{:016X}", sizeof(data) * 8,
static_cast<u32>(data), vaddr);
@@ -755,15 +699,13 @@ struct Memory::Impl {
}
bool WriteExclusive128(const VAddr vaddr, const u128 data, const u128 expected) {
- u8* const page_pointer = current_page_table->pointers[vaddr >> PAGE_BITS];
- if (page_pointer != nullptr) {
+ const uintptr_t raw_pointer = current_page_table->pointers[vaddr >> PAGE_BITS].Raw();
+ if (u8* const pointer = Common::PageTable::PageInfo::ExtractPointer(raw_pointer)) {
// NOTE: Avoid adding any extra logic to this fast-path block
- auto* pointer = reinterpret_cast<volatile u64*>(&page_pointer[vaddr]);
- return Common::AtomicCompareAndSwap(pointer, data, expected);
+ const auto volatile_pointer = reinterpret_cast<volatile u64*>(&pointer[vaddr]);
+ return Common::AtomicCompareAndSwap(volatile_pointer, data, expected);
}
-
- const Common::PageType type = current_page_table->attributes[vaddr >> PAGE_BITS];
- switch (type) {
+ switch (Common::PageTable::PageInfo::ExtractType(raw_pointer)) {
case Common::PageType::Unmapped:
LOG_ERROR(HW_Memory, "Unmapped Write{} 0x{:08X} @ 0x{:016X}{:016X}", sizeof(data) * 8,
static_cast<u64>(data[1]), static_cast<u64>(data[0]), vaddr);
@@ -783,7 +725,6 @@ struct Memory::Impl {
return true;
}
- mutable std::mutex rasterizer_cache_guard;
Common::PageTable* current_page_table = nullptr;
Core::System& system;
};