diff options
author | bunnei <bunneidev@gmail.com> | 2022-09-06 02:51:50 +0200 |
---|---|---|
committer | bunnei <bunneidev@gmail.com> | 2022-10-19 04:13:34 +0200 |
commit | 345b9e6a08f7ce99bb71f7184157ed0fe22bf27d (patch) | |
tree | 0fdc0689d1c8c78753cfc14376ee37bc10cc9b0c /src/core/hle/kernel | |
parent | core: hle: kernel: k_process: Change Status -> State. (diff) | |
download | yuzu-345b9e6a08f7ce99bb71f7184157ed0fe22bf27d.tar yuzu-345b9e6a08f7ce99bb71f7184157ed0fe22bf27d.tar.gz yuzu-345b9e6a08f7ce99bb71f7184157ed0fe22bf27d.tar.bz2 yuzu-345b9e6a08f7ce99bb71f7184157ed0fe22bf27d.tar.lz yuzu-345b9e6a08f7ce99bb71f7184157ed0fe22bf27d.tar.xz yuzu-345b9e6a08f7ce99bb71f7184157ed0fe22bf27d.tar.zst yuzu-345b9e6a08f7ce99bb71f7184157ed0fe22bf27d.zip |
Diffstat (limited to '')
-rw-r--r-- | src/core/hle/kernel/k_dynamic_page_manager.h | 136 |
1 files changed, 136 insertions, 0 deletions
diff --git a/src/core/hle/kernel/k_dynamic_page_manager.h b/src/core/hle/kernel/k_dynamic_page_manager.h new file mode 100644 index 000000000..88d53776a --- /dev/null +++ b/src/core/hle/kernel/k_dynamic_page_manager.h @@ -0,0 +1,136 @@ +// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#pragma once + +#include "common/alignment.h" +#include "common/common_types.h" +#include "core/hle/kernel/k_page_bitmap.h" +#include "core/hle/kernel/k_spin_lock.h" +#include "core/hle/kernel/memory_types.h" +#include "core/hle/kernel/svc_results.h" + +namespace Kernel { + +class KDynamicPageManager { +public: + class PageBuffer { + private: + u8 m_buffer[PageSize]; + }; + static_assert(sizeof(PageBuffer) == PageSize); + +public: + KDynamicPageManager() = default; + + template <typename T> + T* GetPointer(VAddr addr) { + return reinterpret_cast<T*>(m_backing_memory.data() + (addr - m_address)); + } + + template <typename T> + const T* GetPointer(VAddr addr) const { + return reinterpret_cast<T*>(m_backing_memory.data() + (addr - m_address)); + } + + Result Initialize(VAddr addr, size_t sz) { + // We need to have positive size. + R_UNLESS(sz > 0, ResultOutOfMemory); + m_backing_memory.resize(sz); + + // Calculate management overhead. + const size_t management_size = + KPageBitmap::CalculateManagementOverheadSize(sz / sizeof(PageBuffer)); + const size_t allocatable_size = sz - management_size; + + // Set tracking fields. + m_address = addr; + m_size = Common::AlignDown(allocatable_size, sizeof(PageBuffer)); + m_count = allocatable_size / sizeof(PageBuffer); + R_UNLESS(m_count > 0, ResultOutOfMemory); + + // Clear the management region. + u64* management_ptr = GetPointer<u64>(m_address + allocatable_size); + std::memset(management_ptr, 0, management_size); + + // Initialize the bitmap. + m_page_bitmap.Initialize(management_ptr, m_count); + + // Free the pages to the bitmap. + for (size_t i = 0; i < m_count; i++) { + // Ensure the freed page is all-zero. + std::memset(GetPointer<PageBuffer>(m_address) + i, 0, PageSize); + + // Set the bit for the free page. + m_page_bitmap.SetBit(i); + } + + return ResultSuccess; + } + + VAddr GetAddress() const { + return m_address; + } + size_t GetSize() const { + return m_size; + } + size_t GetUsed() const { + return m_used; + } + size_t GetPeak() const { + return m_peak; + } + size_t GetCount() const { + return m_count; + } + + PageBuffer* Allocate() { + // Take the lock. + // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable. + KScopedSpinLock lk(m_lock); + + // Find a random free block. + s64 soffset = m_page_bitmap.FindFreeBlock(true); + if (soffset < 0) [[unlikely]] { + return nullptr; + } + + const size_t offset = static_cast<size_t>(soffset); + + // Update our tracking. + m_page_bitmap.ClearBit(offset); + m_peak = std::max(m_peak, (++m_used)); + + return GetPointer<PageBuffer>(m_address) + offset; + } + + void Free(PageBuffer* pb) { + // Ensure all pages in the heap are zero. + std::memset(pb, 0, PageSize); + + // Take the lock. + // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable. + KScopedSpinLock lk(m_lock); + + // Set the bit for the free page. + size_t offset = (reinterpret_cast<uintptr_t>(pb) - m_address) / sizeof(PageBuffer); + m_page_bitmap.SetBit(offset); + + // Decrement our used count. + --m_used; + } + +private: + KSpinLock m_lock; + KPageBitmap m_page_bitmap; + size_t m_used{}; + size_t m_peak{}; + size_t m_count{}; + VAddr m_address{}; + size_t m_size{}; + + // TODO(bunnei): Back by host memory until we emulate kernel virtual address space. + std::vector<u8> m_backing_memory; +}; + +} // namespace Kernel |