From cdeeecf0807d0005356f30db0f7164c5891a9245 Mon Sep 17 00:00:00 2001 From: Yuri Kunde Schlesner Date: Fri, 17 Jul 2015 23:19:16 -0300 Subject: Kernel: Properly implement ControlMemory FREE and COMMIT --- src/core/hle/kernel/process.cpp | 120 +++++++++++++++++++++++++++++++++++-- src/core/hle/kernel/process.h | 31 ++++++++-- src/core/hle/kernel/vm_manager.cpp | 91 ++++++++++++++++++++++++---- src/core/hle/kernel/vm_manager.h | 24 +++++++- 4 files changed, 243 insertions(+), 23 deletions(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp index ad953cdbf..1db763999 100644 --- a/src/core/hle/kernel/process.cpp +++ b/src/core/hle/kernel/process.cpp @@ -36,8 +36,7 @@ SharedPtr Process::Create(SharedPtr code_set) { process->codeset = std::move(code_set); process->flags.raw = 0; process->flags.memory_region = MemoryRegion::APPLICATION; - process->address_space = Common::make_unique(); - Memory::InitLegacyAddressSpace(*process->address_space); + Memory::InitLegacyAddressSpace(process->vm_manager); return process; } @@ -104,19 +103,130 @@ void Process::ParseKernelCaps(const u32* kernel_caps, size_t len) { void Process::Run(s32 main_thread_priority, u32 stack_size) { auto MapSegment = [&](CodeSet::Segment& segment, VMAPermission permissions, MemoryState memory_state) { - auto vma = address_space->MapMemoryBlock(segment.addr, codeset->memory, + auto vma = vm_manager.MapMemoryBlock(segment.addr, codeset->memory, segment.offset, segment.size, memory_state).Unwrap(); - address_space->Reprotect(vma, permissions); + vm_manager.Reprotect(vma, permissions); }; + // Map CodeSet segments MapSegment(codeset->code, VMAPermission::ReadExecute, MemoryState::Code); MapSegment(codeset->rodata, VMAPermission::Read, MemoryState::Code); MapSegment(codeset->data, VMAPermission::ReadWrite, MemoryState::Private); - address_space->LogLayout(Log::Level::Debug); + // Allocate and map stack + vm_manager.MapMemoryBlock(Memory::HEAP_VADDR_END - stack_size, + std::make_shared>(stack_size, 0), 0, stack_size, MemoryState::Locked + ).Unwrap(); + + vm_manager.LogLayout(Log::Level::Debug); Kernel::SetupMainThread(codeset->entrypoint, main_thread_priority); } +ResultVal Process::HeapAllocate(VAddr target, u32 size, VMAPermission perms) { + if (target < Memory::HEAP_VADDR || target + size > Memory::HEAP_VADDR_END || target + size < target) { + return ERR_INVALID_ADDRESS; + } + + if (heap_memory == nullptr) { + // Initialize heap + heap_memory = std::make_shared>(); + heap_start = heap_end = target; + } + + // If necessary, expand backing vector to cover new heap extents. + if (target < heap_start) { + heap_memory->insert(begin(*heap_memory), heap_start - target, 0); + heap_start = target; + vm_manager.RefreshMemoryBlockMappings(heap_memory.get()); + } + if (target + size > heap_end) { + heap_memory->insert(end(*heap_memory), (target + size) - heap_end, 0); + heap_end = target + size; + vm_manager.RefreshMemoryBlockMappings(heap_memory.get()); + } + ASSERT(heap_end - heap_start == heap_memory->size()); + + CASCADE_RESULT(auto vma, vm_manager.MapMemoryBlock(target, heap_memory, target - heap_start, size, MemoryState::Private)); + vm_manager.Reprotect(vma, perms); + + return MakeResult(heap_end - size); +} + +ResultCode Process::HeapFree(VAddr target, u32 size) { + if (target < Memory::HEAP_VADDR || target + size > Memory::HEAP_VADDR_END || target + size < target) { + return ERR_INVALID_ADDRESS; + } + + ResultCode result = vm_manager.UnmapRange(target, size); + if (result.IsError()) return result; + + return RESULT_SUCCESS; +} + +ResultVal Process::LinearAllocate(VAddr target, u32 size, VMAPermission perms) { + if (linear_heap_memory == nullptr) { + // Initialize heap + linear_heap_memory = std::make_shared>(); + } + + VAddr heap_end = Memory::LINEAR_HEAP_VADDR + (u32)linear_heap_memory->size(); + // Games and homebrew only ever seem to pass 0 here (which lets the kernel decide the address), + // but explicit addresses are also accepted and respected. + if (target == 0) { + target = heap_end; + } + + if (target < Memory::LINEAR_HEAP_VADDR || target + size > Memory::LINEAR_HEAP_VADDR_END || + target > heap_end || target + size < target) { + + return ERR_INVALID_ADDRESS; + } + + // Expansion of the linear heap is only allowed if you do an allocation immediatelly at its + // end. It's possible to free gaps in the middle of the heap and then reallocate them later, + // but expansions are only allowed at the end. + if (target == heap_end) { + linear_heap_memory->insert(linear_heap_memory->end(), size, 0); + vm_manager.RefreshMemoryBlockMappings(linear_heap_memory.get()); + } + + size_t offset = target - Memory::LINEAR_HEAP_VADDR; + CASCADE_RESULT(auto vma, vm_manager.MapMemoryBlock(target, linear_heap_memory, offset, size, MemoryState::Continuous)); + vm_manager.Reprotect(vma, perms); + + return MakeResult(target); +} + +ResultCode Process::LinearFree(VAddr target, u32 size) { + if (linear_heap_memory == nullptr || target < Memory::LINEAR_HEAP_VADDR || + target + size > Memory::LINEAR_HEAP_VADDR_END || target + size < target) { + + return ERR_INVALID_ADDRESS; + } + + VAddr heap_end = Memory::LINEAR_HEAP_VADDR + (u32)linear_heap_memory->size(); + if (target + size > heap_end) { + return ERR_INVALID_ADDRESS_STATE; + } + + ResultCode result = vm_manager.UnmapRange(target, size); + if (result.IsError()) return result; + + if (target + size == heap_end) { + // End of linear heap has been freed, so check what's the last allocated block in it and + // reduce the size. + auto vma = vm_manager.FindVMA(target); + ASSERT(vma != vm_manager.vma_map.end()); + ASSERT(vma->second.type == VMAType::Free); + VAddr new_end = vma->second.base; + if (new_end >= Memory::LINEAR_HEAP_VADDR) { + linear_heap_memory->resize(new_end - Memory::LINEAR_HEAP_VADDR); + } + } + + return RESULT_SUCCESS; +} + Kernel::Process::Process() {} Kernel::Process::~Process() {} diff --git a/src/core/hle/kernel/process.h b/src/core/hle/kernel/process.h index 83d3aceae..567d5df18 100644 --- a/src/core/hle/kernel/process.h +++ b/src/core/hle/kernel/process.h @@ -15,6 +15,7 @@ #include "common/common_types.h" #include "core/hle/kernel/kernel.h" +#include "core/hle/kernel/vm_manager.h" namespace Kernel { @@ -48,7 +49,6 @@ union ProcessFlags { }; class ResourceLimit; -class VMManager; struct CodeSet final : public Object { static SharedPtr Create(std::string name, u64 program_id); @@ -108,10 +108,6 @@ public: /// The id of this process u32 process_id = next_process_id++; - /// Bitmask of the used TLS slots - std::bitset<300> used_tls_slots; - std::unique_ptr address_space; - /** * Parses a list of kernel capability descriptors (as found in the ExHeader) and applies them * to this process. @@ -123,6 +119,31 @@ public: */ void Run(s32 main_thread_priority, u32 stack_size); + + /////////////////////////////////////////////////////////////////////////////////////////////// + // Memory Management + + VMManager vm_manager; + + // Memory used to back the allocations in the regular heap. A single vector is used to cover + // the entire virtual address space extents that bound the allocations, including any holes. + // This makes deallocation and reallocation of holes fast and keeps process memory contiguous + // in the emulator address space, allowing Memory::GetPointer to be reasonably safe. + std::shared_ptr> heap_memory; + // The left/right bounds of the address space covered by heap_memory. + VAddr heap_start = 0, heap_end = 0; + + std::shared_ptr> linear_heap_memory; + + /// Bitmask of the used TLS slots + std::bitset<300> used_tls_slots; + + ResultVal HeapAllocate(VAddr target, u32 size, VMAPermission perms); + ResultCode HeapFree(VAddr target, u32 size); + + ResultVal LinearAllocate(VAddr target, u32 size, VMAPermission perms); + ResultCode LinearFree(VAddr target, u32 size); + private: Process(); ~Process() override; diff --git a/src/core/hle/kernel/vm_manager.cpp b/src/core/hle/kernel/vm_manager.cpp index 65395476b..2610acf76 100644 --- a/src/core/hle/kernel/vm_manager.cpp +++ b/src/core/hle/kernel/vm_manager.cpp @@ -60,7 +60,11 @@ void VMManager::Reset() { } VMManager::VMAHandle VMManager::FindVMA(VAddr target) const { - return std::prev(vma_map.upper_bound(target)); + if (target >= MAX_ADDRESS) { + return vma_map.end(); + } else { + return std::prev(vma_map.upper_bound(target)); + } } ResultVal VMManager::MapMemoryBlock(VAddr target, @@ -115,10 +119,8 @@ ResultVal VMManager::MapMMIO(VAddr target, PAddr paddr, u3 return MakeResult(MergeAdjacent(vma_handle)); } -void VMManager::Unmap(VMAHandle vma_handle) { - VMAIter iter = StripIterConstness(vma_handle); - - VirtualMemoryArea& vma = iter->second; +VMManager::VMAIter VMManager::Unmap(VMAIter vma_handle) { + VirtualMemoryArea& vma = vma_handle->second; vma.type = VMAType::Free; vma.permissions = VMAPermission::None; vma.meminfo_state = MemoryState::Free; @@ -130,17 +132,57 @@ void VMManager::Unmap(VMAHandle vma_handle) { UpdatePageTableForVMA(vma); - MergeAdjacent(iter); + return MergeAdjacent(vma_handle); +} + +ResultCode VMManager::UnmapRange(VAddr target, u32 size) { + CASCADE_RESULT(VMAIter vma, CarveVMARange(target, size)); + VAddr target_end = target + size; + + VMAIter end = vma_map.end(); + // The comparison against the end of the range must be done using addresses since VMAs can be + // merged during this process, causing invalidation of the iterators. + while (vma != end && vma->second.base < target_end) { + vma = std::next(Unmap(vma)); + } + + ASSERT(FindVMA(target)->second.size >= size); + return RESULT_SUCCESS; } -void VMManager::Reprotect(VMAHandle vma_handle, VMAPermission new_perms) { +VMManager::VMAHandle VMManager::Reprotect(VMAHandle vma_handle, VMAPermission new_perms) { VMAIter iter = StripIterConstness(vma_handle); VirtualMemoryArea& vma = iter->second; vma.permissions = new_perms; UpdatePageTableForVMA(vma); - MergeAdjacent(iter); + return MergeAdjacent(iter); +} + +ResultCode VMManager::ReprotectRange(VAddr target, u32 size, VMAPermission new_perms) { + CASCADE_RESULT(VMAIter vma, CarveVMARange(target, size)); + VAddr target_end = target + size; + + VMAIter end = vma_map.end(); + // The comparison against the end of the range must be done using addresses since VMAs can be + // merged during this process, causing invalidation of the iterators. + while (vma != end && vma->second.base < target_end) { + vma = std::next(StripIterConstness(Reprotect(vma, new_perms))); + } + + return RESULT_SUCCESS; +} + +void VMManager::RefreshMemoryBlockMappings(const std::vector* block) { + // If this ever proves to have a noticeable performance impact, allow users of the function to + // specify a specific range of addresses to limit the scan to. + for (const auto& p : vma_map) { + const VirtualMemoryArea& vma = p.second; + if (block == vma.backing_block.get()) { + UpdatePageTableForVMA(vma); + } + } } void VMManager::LogLayout(Log::Level log_level) const { @@ -161,8 +203,8 @@ VMManager::VMAIter VMManager::StripIterConstness(const VMAHandle & iter) { } ResultVal VMManager::CarveVMA(VAddr base, u32 size) { - ASSERT_MSG((size & Memory::PAGE_MASK) == 0, "non-page aligned size: %8X", size); - ASSERT_MSG((base & Memory::PAGE_MASK) == 0, "non-page aligned base: %08X", base); + ASSERT_MSG((size & Memory::PAGE_MASK) == 0, "non-page aligned size: 0x%8X", size); + ASSERT_MSG((base & Memory::PAGE_MASK) == 0, "non-page aligned base: 0x%08X", base); VMAIter vma_handle = StripIterConstness(FindVMA(base)); if (vma_handle == vma_map.end()) { @@ -196,6 +238,35 @@ ResultVal VMManager::CarveVMA(VAddr base, u32 size) { return MakeResult(vma_handle); } +ResultVal VMManager::CarveVMARange(VAddr target, u32 size) { + ASSERT_MSG((size & Memory::PAGE_MASK) == 0, "non-page aligned size: 0x%8X", size); + ASSERT_MSG((target & Memory::PAGE_MASK) == 0, "non-page aligned base: 0x%08X", target); + + VAddr target_end = target + size; + ASSERT(target_end >= target); + ASSERT(target_end <= MAX_ADDRESS); + ASSERT(size > 0); + + VMAIter begin_vma = StripIterConstness(FindVMA(target)); + VMAIter i_end = vma_map.lower_bound(target_end); + for (auto i = begin_vma; i != i_end; ++i) { + if (i->second.type == VMAType::Free) { + return ERR_INVALID_ADDRESS_STATE; + } + } + + if (target != begin_vma->second.base) { + begin_vma = SplitVMA(begin_vma, target - begin_vma->second.base); + } + + VMAIter end_vma = StripIterConstness(FindVMA(target_end)); + if (end_vma != vma_map.end() && target_end != end_vma->second.base) { + end_vma = SplitVMA(end_vma, target_end - end_vma->second.base); + } + + return MakeResult(begin_vma); +} + VMManager::VMAIter VMManager::SplitVMA(VMAIter vma_handle, u32 offset_in_vma) { VirtualMemoryArea& old_vma = vma_handle->second; VirtualMemoryArea new_vma = old_vma; // Make a copy of the VMA diff --git a/src/core/hle/kernel/vm_manager.h b/src/core/hle/kernel/vm_manager.h index 15c10e413..4e95f1f0c 100644 --- a/src/core/hle/kernel/vm_manager.h +++ b/src/core/hle/kernel/vm_manager.h @@ -171,11 +171,20 @@ public: */ ResultVal MapMMIO(VAddr target, PAddr paddr, u32 size, MemoryState state); - /// Unmaps the given VMA. - void Unmap(VMAHandle vma); + /// Unmaps a range of addresses, splitting VMAs as necessary. + ResultCode UnmapRange(VAddr target, u32 size); /// Changes the permissions of the given VMA. - void Reprotect(VMAHandle vma, VMAPermission new_perms); + VMAHandle Reprotect(VMAHandle vma, VMAPermission new_perms); + + /// Changes the permissions of a range of addresses, splitting VMAs as necessary. + ResultCode ReprotectRange(VAddr target, u32 size, VMAPermission new_perms); + + /** + * Scans all VMAs and updates the page table range of any that use the given vector as backing + * memory. This should be called after any operation that causes reallocation of the vector. + */ + void RefreshMemoryBlockMappings(const std::vector* block); /// Dumps the address space layout to the log, for debugging void LogLayout(Log::Level log_level) const; @@ -186,12 +195,21 @@ private: /// Converts a VMAHandle to a mutable VMAIter. VMAIter StripIterConstness(const VMAHandle& iter); + /// Unmaps the given VMA. + VMAIter Unmap(VMAIter vma); + /** * Carves a VMA of a specific size at the specified address by splitting Free VMAs while doing * the appropriate error checking. */ ResultVal CarveVMA(VAddr base, u32 size); + /** + * Splits the edges of the given range of non-Free VMAs so that there is a VMA split at each + * end of the range. + */ + ResultVal CarveVMARange(VAddr base, u32 size); + /** * Splits a VMA in two, at the specified offset. * @returns the right side of the split, with the original iterator becoming the left side. -- cgit v1.2.3