From 9bede4eeed523f9707a989f1297279c006086e76 Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Thu, 18 Jul 2019 18:15:53 -0400 Subject: VM_Manager: Align allocated memory to 256bytes This commit ensures that all backing memory allocated for the Guest CPU is aligned to 256 bytes. This due to how gpu memory works and the heavy constraints it has in the alignment of physical memory. --- src/core/hle/kernel/vm_manager.cpp | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) (limited to 'src/core/hle/kernel/vm_manager.cpp') diff --git a/src/core/hle/kernel/vm_manager.cpp b/src/core/hle/kernel/vm_manager.cpp index 4f45fb03b..40cea1e7c 100644 --- a/src/core/hle/kernel/vm_manager.cpp +++ b/src/core/hle/kernel/vm_manager.cpp @@ -5,6 +5,7 @@ #include #include #include +#include "common/alignment.h" #include "common/assert.h" #include "common/logging/log.h" #include "common/memory_hook.h" @@ -103,7 +104,7 @@ bool VMManager::IsValidHandle(VMAHandle handle) const { } ResultVal VMManager::MapMemoryBlock(VAddr target, - std::shared_ptr> block, + std::shared_ptr block, std::size_t offset, u64 size, MemoryState state, VMAPermission perm) { ASSERT(block != nullptr); @@ -260,7 +261,7 @@ ResultVal VMManager::SetHeapSize(u64 size) { if (heap_memory == nullptr) { // Initialize heap - heap_memory = std::make_shared>(size); + heap_memory = std::make_shared(size); heap_end = heap_region_base + size; } else { UnmapRange(heap_region_base, GetCurrentHeapSize()); @@ -341,7 +342,7 @@ ResultCode VMManager::MapPhysicalMemory(VAddr target, u64 size) { const auto map_size = std::min(end_addr - cur_addr, vma_end - cur_addr); if (vma.state == MemoryState::Unmapped) { const auto map_res = - MapMemoryBlock(cur_addr, std::make_shared>(map_size, 0), 0, + MapMemoryBlock(cur_addr, std::make_shared(map_size, 0), 0, map_size, MemoryState::Heap, VMAPermission::ReadWrite); result = map_res.Code(); if (result.IsError()) { @@ -442,7 +443,7 @@ ResultCode VMManager::UnmapPhysicalMemory(VAddr target, u64 size) { if (result.IsError()) { for (const auto [map_address, map_size] : unmapped_regions) { const auto remap_res = - MapMemoryBlock(map_address, std::make_shared>(map_size, 0), 0, + MapMemoryBlock(map_address, std::make_shared(map_size, 0), 0, map_size, MemoryState::Heap, VMAPermission::None); ASSERT_MSG(remap_res.Succeeded(), "UnmapPhysicalMemory re-map on error"); } @@ -593,7 +594,7 @@ ResultCode VMManager::MirrorMemory(VAddr dst_addr, VAddr src_addr, u64 size, Mem ASSERT_MSG(vma_offset + size <= vma->second.size, "Shared memory exceeds bounds of mapped block"); - const std::shared_ptr>& backing_block = vma->second.backing_block; + const std::shared_ptr& backing_block = vma->second.backing_block; const std::size_t backing_block_offset = vma->second.offset + vma_offset; CASCADE_RESULT(auto new_vma, @@ -606,7 +607,7 @@ ResultCode VMManager::MirrorMemory(VAddr dst_addr, VAddr src_addr, u64 size, Mem return RESULT_SUCCESS; } -void VMManager::RefreshMemoryBlockMappings(const std::vector* block) { +void VMManager::RefreshMemoryBlockMappings(const PhysicalMemory* block) { // If this ever proves to have a noticeable performance impact, allow users of the function to // specify a specific range of addresses to limit the scan to. for (const auto& p : vma_map) { @@ -764,7 +765,7 @@ void VMManager::MergeAdjacentVMA(VirtualMemoryArea& left, const VirtualMemoryAre right.backing_block->begin() + right.offset + right.size); } else { // Slow case: make a new memory block for left and right. - auto new_memory = std::make_shared>(); + auto new_memory = std::make_shared(); new_memory->insert(new_memory->end(), left.backing_block->begin() + left.offset, left.backing_block->begin() + left.offset + left.size); new_memory->insert(new_memory->end(), right.backing_block->begin() + right.offset, -- cgit v1.2.3