summaryrefslogtreecommitdiffstats
path: root/src/core/hle/kernel/k_memory_layout.cpp
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--src/core/hle/kernel/k_memory_layout.cpp183
1 files changed, 183 insertions, 0 deletions
diff --git a/src/core/hle/kernel/k_memory_layout.cpp b/src/core/hle/kernel/k_memory_layout.cpp
new file mode 100644
index 000000000..58fe4a133
--- /dev/null
+++ b/src/core/hle/kernel/k_memory_layout.cpp
@@ -0,0 +1,183 @@
+// Copyright 2021 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include "common/alignment.h"
+#include "core/hle/kernel/k_memory_layout.h"
+#include "core/hle/kernel/k_system_control.h"
+
+namespace Kernel {
+
+namespace {
+
+class KMemoryRegionAllocator final : NonCopyable {
+public:
+ static constexpr size_t MaxMemoryRegions = 200;
+
+private:
+ KMemoryRegion region_heap[MaxMemoryRegions]{};
+ size_t num_regions{};
+
+public:
+ constexpr KMemoryRegionAllocator() = default;
+
+public:
+ template <typename... Args>
+ KMemoryRegion* Allocate(Args&&... args) {
+ // Ensure we stay within the bounds of our heap.
+ ASSERT(this->num_regions < MaxMemoryRegions);
+
+ // Create the new region.
+ KMemoryRegion* region = std::addressof(this->region_heap[this->num_regions++]);
+ new (region) KMemoryRegion(std::forward<Args>(args)...);
+
+ return region;
+ }
+};
+
+KMemoryRegionAllocator g_memory_region_allocator;
+
+template <typename... Args>
+KMemoryRegion* AllocateRegion(Args&&... args) {
+ return g_memory_region_allocator.Allocate(std::forward<Args>(args)...);
+}
+
+} // namespace
+
+void KMemoryRegionTree::InsertDirectly(u64 address, u64 last_address, u32 attr, u32 type_id) {
+ this->insert(*AllocateRegion(address, last_address, attr, type_id));
+}
+
+bool KMemoryRegionTree::Insert(u64 address, size_t size, u32 type_id, u32 new_attr, u32 old_attr) {
+ // Locate the memory region that contains the address.
+ KMemoryRegion* found = this->FindModifiable(address);
+
+ // We require that the old attr is correct.
+ if (found->GetAttributes() != old_attr) {
+ return false;
+ }
+
+ // We further require that the region can be split from the old region.
+ const u64 inserted_region_end = address + size;
+ const u64 inserted_region_last = inserted_region_end - 1;
+ if (found->GetLastAddress() < inserted_region_last) {
+ return false;
+ }
+
+ // Further, we require that the type id is a valid transformation.
+ if (!found->CanDerive(type_id)) {
+ return false;
+ }
+
+ // Cache information from the region before we remove it.
+ const u64 old_address = found->GetAddress();
+ const u64 old_last = found->GetLastAddress();
+ const u64 old_pair = found->GetPairAddress();
+ const u32 old_type = found->GetType();
+
+ // Erase the existing region from the tree.
+ this->erase(this->iterator_to(*found));
+
+ // Insert the new region into the tree.
+ if (old_address == address) {
+ // Reuse the old object for the new region, if we can.
+ found->Reset(address, inserted_region_last, old_pair, new_attr, type_id);
+ this->insert(*found);
+ } else {
+ // If we can't re-use, adjust the old region.
+ found->Reset(old_address, address - 1, old_pair, old_attr, old_type);
+ this->insert(*found);
+
+ // Insert a new region for the split.
+ const u64 new_pair = (old_pair != std::numeric_limits<u64>::max())
+ ? old_pair + (address - old_address)
+ : old_pair;
+ this->insert(*AllocateRegion(address, inserted_region_last, new_pair, new_attr, type_id));
+ }
+
+ // If we need to insert a region after the region, do so.
+ if (old_last != inserted_region_last) {
+ const u64 after_pair = (old_pair != std::numeric_limits<u64>::max())
+ ? old_pair + (inserted_region_end - old_address)
+ : old_pair;
+ this->insert(
+ *AllocateRegion(inserted_region_end, old_last, after_pair, old_attr, old_type));
+ }
+
+ return true;
+}
+
+VAddr KMemoryRegionTree::GetRandomAlignedRegion(size_t size, size_t alignment, u32 type_id) {
+ // We want to find the total extents of the type id.
+ const auto extents = this->GetDerivedRegionExtents(static_cast<KMemoryRegionType>(type_id));
+
+ // Ensure that our alignment is correct.
+ ASSERT(Common::IsAligned(extents.GetAddress(), alignment));
+
+ const u64 first_address = extents.GetAddress();
+ const u64 last_address = extents.GetLastAddress();
+
+ const u64 first_index = first_address / alignment;
+ const u64 last_index = last_address / alignment;
+
+ while (true) {
+ const u64 candidate =
+ KSystemControl::GenerateRandomRange(first_index, last_index) * alignment;
+
+ // Ensure that the candidate doesn't overflow with the size.
+ if (!(candidate < candidate + size)) {
+ continue;
+ }
+
+ const u64 candidate_last = candidate + size - 1;
+
+ // Ensure that the candidate fits within the region.
+ if (candidate_last > last_address) {
+ continue;
+ }
+
+ // Locate the candidate region, and ensure it fits and has the correct type id.
+ if (const auto& candidate_region = *this->Find(candidate);
+ !(candidate_last <= candidate_region.GetLastAddress() &&
+ candidate_region.GetType() == type_id)) {
+ continue;
+ }
+
+ return candidate;
+ }
+}
+
+void KMemoryLayout::InitializeLinearMemoryRegionTrees(PAddr aligned_linear_phys_start,
+ VAddr linear_virtual_start) {
+ // Set static differences.
+ linear_phys_to_virt_diff = linear_virtual_start - aligned_linear_phys_start;
+ linear_virt_to_phys_diff = aligned_linear_phys_start - linear_virtual_start;
+
+ // Initialize linear trees.
+ for (auto& region : GetPhysicalMemoryRegionTree()) {
+ if (region.HasTypeAttribute(KMemoryRegionAttr_LinearMapped)) {
+ GetPhysicalLinearMemoryRegionTree().InsertDirectly(
+ region.GetAddress(), region.GetLastAddress(), region.GetAttributes(),
+ region.GetType());
+ }
+ }
+
+ for (auto& region : GetVirtualMemoryRegionTree()) {
+ if (region.IsDerivedFrom(KMemoryRegionType_Dram)) {
+ GetVirtualLinearMemoryRegionTree().InsertDirectly(
+ region.GetAddress(), region.GetLastAddress(), region.GetAttributes(),
+ region.GetType());
+ }
+ }
+}
+
+size_t KMemoryLayout::GetResourceRegionSizeForInit() {
+ // Calculate resource region size based on whether we allow extra threads.
+ const bool use_extra_resources = KSystemControl::Init::ShouldIncreaseThreadResourceLimit();
+ size_t resource_region_size =
+ KernelResourceSize + (use_extra_resources ? KernelSlabHeapAdditionalSize : 0);
+
+ return resource_region_size;
+}
+
+} // namespace Kernel