summaryrefslogtreecommitdiffstats
path: root/src/common
diff options
context:
space:
mode:
Diffstat (limited to 'src/common')
-rw-r--r--src/common/CMakeLists.txt2
-rw-r--r--src/common/address_space.cpp11
-rw-r--r--src/common/address_space.h134
-rw-r--r--src/common/address_space.inc338
4 files changed, 485 insertions, 0 deletions
diff --git a/src/common/CMakeLists.txt b/src/common/CMakeLists.txt
index 2db414819..a02696873 100644
--- a/src/common/CMakeLists.txt
+++ b/src/common/CMakeLists.txt
@@ -17,6 +17,8 @@ endif ()
include(GenerateSCMRev)
add_library(common STATIC
+ address_space.cpp
+ address_space.h
algorithm.h
alignment.h
announce_multiplayer_room.h
diff --git a/src/common/address_space.cpp b/src/common/address_space.cpp
new file mode 100644
index 000000000..6db85be87
--- /dev/null
+++ b/src/common/address_space.cpp
@@ -0,0 +1,11 @@
+// Copyright © 2021 Skyline Team and Contributors (https://github.com/skyline-emu/)
+// Licensed under GPLv3 or any later version
+// Refer to the license.txt file included.
+
+#include "common/address_space.inc"
+
+namespace Common {
+
+template class Common::FlatAllocator<u32, 0, 32>;
+
+}
diff --git a/src/common/address_space.h b/src/common/address_space.h
new file mode 100644
index 000000000..fd2f32b7d
--- /dev/null
+++ b/src/common/address_space.h
@@ -0,0 +1,134 @@
+// Copyright © 2021 Skyline Team and Contributors (https://github.com/skyline-emu/)
+// Licensed under GPLv3 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <concepts>
+#include <functional>
+#include <mutex>
+#include <vector>
+
+#include "common/common_types.h"
+
+namespace Common {
+template <typename VaType, size_t AddressSpaceBits>
+concept AddressSpaceValid = std::is_unsigned_v<VaType> && sizeof(VaType) * 8 >= AddressSpaceBits;
+
+struct EmptyStruct {};
+
+/**
+ * @brief FlatAddressSpaceMap provides a generic VA->PA mapping implementation using a sorted vector
+ */
+template <typename VaType, VaType UnmappedVa, typename PaType, PaType UnmappedPa,
+ bool PaContigSplit, size_t AddressSpaceBits, typename ExtraBlockInfo = EmptyStruct>
+requires AddressSpaceValid<VaType, AddressSpaceBits> class FlatAddressSpaceMap {
+private:
+ std::function<void(VaType, VaType)>
+ unmapCallback{}; //!< Callback called when the mappings in an region have changed
+
+protected:
+ /**
+ * @brief Represents a block of memory in the AS, the physical mapping is contiguous until
+ * another block with a different phys address is hit
+ */
+ struct Block {
+ VaType virt{UnmappedVa}; //!< VA of the block
+ PaType phys{UnmappedPa}; //!< PA of the block, will increase 1-1 with VA until a new block
+ //!< is encountered
+ [[no_unique_address]] ExtraBlockInfo extraInfo;
+
+ Block() = default;
+
+ Block(VaType virt, PaType phys, ExtraBlockInfo extraInfo)
+ : virt(virt), phys(phys), extraInfo(extraInfo) {}
+
+ constexpr bool Valid() {
+ return virt != UnmappedVa;
+ }
+
+ constexpr bool Mapped() {
+ return phys != UnmappedPa;
+ }
+
+ constexpr bool Unmapped() {
+ return phys == UnmappedPa;
+ }
+
+ bool operator<(const VaType& pVirt) const {
+ return virt < pVirt;
+ }
+ };
+
+ std::mutex blockMutex;
+ std::vector<Block> blocks{Block{}};
+
+ /**
+ * @brief Maps a PA range into the given AS region
+ * @note blockMutex MUST be locked when calling this
+ */
+ void MapLocked(VaType virt, PaType phys, VaType size, ExtraBlockInfo extraInfo);
+
+ /**
+ * @brief Unmaps the given range and merges it with other unmapped regions
+ * @note blockMutex MUST be locked when calling this
+ */
+ void UnmapLocked(VaType virt, VaType size);
+
+public:
+ static constexpr VaType VaMaximum{(1ULL << (AddressSpaceBits - 1)) +
+ ((1ULL << (AddressSpaceBits - 1)) -
+ 1)}; //!< The maximum VA that this AS can technically reach
+
+ VaType vaLimit{VaMaximum}; //!< A soft limit on the maximum VA of the AS
+
+ FlatAddressSpaceMap(VaType vaLimit, std::function<void(VaType, VaType)> unmapCallback = {});
+
+ FlatAddressSpaceMap() = default;
+
+ void Map(VaType virt, PaType phys, VaType size, ExtraBlockInfo extraInfo = {}) {
+ std::scoped_lock lock(blockMutex);
+ MapLocked(virt, phys, size, extraInfo);
+ }
+
+ void Unmap(VaType virt, VaType size) {
+ std::scoped_lock lock(blockMutex);
+ UnmapLocked(virt, size);
+ }
+};
+
+/**
+ * @brief FlatMemoryManager specialises FlatAddressSpaceMap to work as an allocator, with an
+ * initial, fast linear pass and a subsequent slower pass that iterates until it finds a free block
+ */
+template <typename VaType, VaType UnmappedVa, size_t AddressSpaceBits>
+requires AddressSpaceValid<VaType, AddressSpaceBits> class FlatAllocator
+ : public FlatAddressSpaceMap<VaType, UnmappedVa, bool, false, false, AddressSpaceBits> {
+private:
+ using Base = FlatAddressSpaceMap<VaType, UnmappedVa, bool, false, false, AddressSpaceBits>;
+
+ VaType currentLinearAllocEnd; //!< The end address for the initial linear allocation pass, once
+ //!< this reaches the AS limit the slower allocation path will be
+ //!< used
+
+public:
+ VaType vaStart; //!< The base VA of the allocator, no allocations will be below this
+
+ FlatAllocator(VaType vaStart, VaType vaLimit = Base::VaMaximum);
+
+ /**
+ * @brief Allocates a region in the AS of the given size and returns its address
+ */
+ VaType Allocate(VaType size);
+
+ /**
+ * @brief Marks the given region in the AS as allocated
+ */
+ void AllocateFixed(VaType virt, VaType size);
+
+ /**
+ * @brief Frees an AS region so it can be used again
+ */
+ void Free(VaType virt, VaType size);
+};
+} // namespace Common
diff --git a/src/common/address_space.inc b/src/common/address_space.inc
new file mode 100644
index 000000000..907c55d88
--- /dev/null
+++ b/src/common/address_space.inc
@@ -0,0 +1,338 @@
+// SPDX-License-Identifier: GPLv3 or later
+// Copyright © 2021 Skyline Team and Contributors (https://github.com/skyline-emu/)
+
+#include "common/address_space.h"
+#include "common/assert.h"
+
+#define MAP_MEMBER(returnType) \
+ template <typename VaType, VaType UnmappedVa, typename PaType, PaType UnmappedPa, \
+ bool PaContigSplit, size_t AddressSpaceBits, typename ExtraBlockInfo> \
+ requires AddressSpaceValid<VaType, AddressSpaceBits> returnType FlatAddressSpaceMap< \
+ VaType, UnmappedVa, PaType, UnmappedPa, PaContigSplit, AddressSpaceBits, ExtraBlockInfo>
+#define MAP_MEMBER_CONST() \
+ template <typename VaType, VaType UnmappedVa, typename PaType, PaType UnmappedPa, \
+ bool PaContigSplit, size_t AddressSpaceBits, typename ExtraBlockInfo> \
+ requires AddressSpaceValid<VaType, AddressSpaceBits> FlatAddressSpaceMap< \
+ VaType, UnmappedVa, PaType, UnmappedPa, PaContigSplit, AddressSpaceBits, ExtraBlockInfo>
+
+#define MM_MEMBER(returnType) \
+ template <typename VaType, VaType UnmappedVa, size_t AddressSpaceBits> \
+ requires AddressSpaceValid<VaType, AddressSpaceBits> returnType \
+ FlatMemoryManager<VaType, UnmappedVa, AddressSpaceBits>
+
+#define ALLOC_MEMBER(returnType) \
+ template <typename VaType, VaType UnmappedVa, size_t AddressSpaceBits> \
+ requires AddressSpaceValid<VaType, AddressSpaceBits> returnType \
+ FlatAllocator<VaType, UnmappedVa, AddressSpaceBits>
+#define ALLOC_MEMBER_CONST() \
+ template <typename VaType, VaType UnmappedVa, size_t AddressSpaceBits> \
+ requires AddressSpaceValid<VaType, AddressSpaceBits> \
+ FlatAllocator<VaType, UnmappedVa, AddressSpaceBits>
+
+namespace Common {
+MAP_MEMBER_CONST()::FlatAddressSpaceMap(VaType vaLimit,
+ std::function<void(VaType, VaType)> unmapCallback)
+ : unmapCallback(std::move(unmapCallback)), vaLimit(vaLimit) {
+ if (vaLimit > VaMaximum)
+ UNREACHABLE_MSG("Invalid VA limit!");
+}
+
+MAP_MEMBER(void)::MapLocked(VaType virt, PaType phys, VaType size, ExtraBlockInfo extraInfo) {
+ VaType virtEnd{virt + size};
+
+ if (virtEnd > vaLimit)
+ UNREACHABLE_MSG("Trying to map a block past the VA limit: virtEnd: 0x{:X}, vaLimit: 0x{:X}",
+ virtEnd, vaLimit);
+
+ auto blockEndSuccessor{std::lower_bound(blocks.begin(), blocks.end(), virtEnd)};
+ if (blockEndSuccessor == blocks.begin())
+ UNREACHABLE_MSG("Trying to map a block before the VA start: virtEnd: 0x{:X}", virtEnd);
+
+ auto blockEndPredecessor{std::prev(blockEndSuccessor)};
+
+ if (blockEndSuccessor != blocks.end()) {
+ // We have blocks in front of us, if one is directly in front then we don't have to add a
+ // tail
+ if (blockEndSuccessor->virt != virtEnd) {
+ PaType tailPhys{[&]() -> PaType {
+ if constexpr (!PaContigSplit) {
+ return blockEndPredecessor
+ ->phys; // Always propagate unmapped regions rather than calculating offset
+ } else {
+ if (blockEndPredecessor->Unmapped())
+ return blockEndPredecessor->phys; // Always propagate unmapped regions
+ // rather than calculating offset
+ else
+ return blockEndPredecessor->phys + virtEnd - blockEndPredecessor->virt;
+ }
+ }()};
+
+ if (blockEndPredecessor->virt >= virt) {
+ // If this block's start would be overlapped by the map then reuse it as a tail
+ // block
+ blockEndPredecessor->virt = virtEnd;
+ blockEndPredecessor->phys = tailPhys;
+ blockEndPredecessor->extraInfo = blockEndPredecessor->extraInfo;
+
+ // No longer predecessor anymore
+ blockEndSuccessor = blockEndPredecessor--;
+ } else {
+ // Else insert a new one and we're done
+ blocks.insert(blockEndSuccessor,
+ {Block(virt, phys, extraInfo),
+ Block(virtEnd, tailPhys, blockEndPredecessor->extraInfo)});
+ if (unmapCallback)
+ unmapCallback(virt, size);
+
+ return;
+ }
+ }
+ } else {
+ // blockEndPredecessor will always be unmapped as blocks has to be terminated by an unmapped
+ // chunk
+ if (blockEndPredecessor != blocks.begin() && blockEndPredecessor->virt >= virt) {
+ // Move the unmapped block start backwards
+ blockEndPredecessor->virt = virtEnd;
+
+ // No longer predecessor anymore
+ blockEndSuccessor = blockEndPredecessor--;
+ } else {
+ // Else insert a new one and we're done
+ blocks.insert(blockEndSuccessor,
+ {Block(virt, phys, extraInfo), Block(virtEnd, UnmappedPa, {})});
+ if (unmapCallback)
+ unmapCallback(virt, size);
+
+ return;
+ }
+ }
+
+ auto blockStartSuccessor{blockEndSuccessor};
+
+ // Walk the block vector to find the start successor as this is more efficient than another
+ // binary search in most scenarios
+ while (std::prev(blockStartSuccessor)->virt >= virt)
+ blockStartSuccessor--;
+
+ // Check that the start successor is either the end block or something in between
+ if (blockStartSuccessor->virt > virtEnd) {
+ UNREACHABLE_MSG("Unsorted block in AS map: virt: 0x{:X}", blockStartSuccessor->virt);
+ } else if (blockStartSuccessor->virt == virtEnd) {
+ // We need to create a new block as there are none spare that we would overwrite
+ blocks.insert(blockStartSuccessor, Block(virt, phys, extraInfo));
+ } else {
+ // Erase overwritten blocks
+ if (auto eraseStart{std::next(blockStartSuccessor)}; eraseStart != blockEndSuccessor)
+ blocks.erase(eraseStart, blockEndSuccessor);
+
+ // Reuse a block that would otherwise be overwritten as a start block
+ blockStartSuccessor->virt = virt;
+ blockStartSuccessor->phys = phys;
+ blockStartSuccessor->extraInfo = extraInfo;
+ }
+
+ if (unmapCallback)
+ unmapCallback(virt, size);
+}
+
+MAP_MEMBER(void)::UnmapLocked(VaType virt, VaType size) {
+ VaType virtEnd{virt + size};
+
+ if (virtEnd > vaLimit)
+ UNREACHABLE_MSG("Trying to map a block past the VA limit: virtEnd: 0x{:X}, vaLimit: 0x{:X}",
+ virtEnd, vaLimit);
+
+ auto blockEndSuccessor{std::lower_bound(blocks.begin(), blocks.end(), virtEnd)};
+ if (blockEndSuccessor == blocks.begin())
+ UNREACHABLE_MSG("Trying to unmap a block before the VA start: virtEnd: 0x{:X}", virtEnd);
+
+ auto blockEndPredecessor{std::prev(blockEndSuccessor)};
+
+ auto walkBackToPredecessor{[&](auto iter) {
+ while (iter->virt >= virt)
+ iter--;
+
+ return iter;
+ }};
+
+ auto eraseBlocksWithEndUnmapped{[&](auto unmappedEnd) {
+ auto blockStartPredecessor{walkBackToPredecessor(unmappedEnd)};
+ auto blockStartSuccessor{std::next(blockStartPredecessor)};
+
+ auto eraseEnd{[&]() {
+ if (blockStartPredecessor->Unmapped()) {
+ // If the start predecessor is unmapped then we can erase everything in our region
+ // and be done
+ return std::next(unmappedEnd);
+ } else {
+ // Else reuse the end predecessor as the start of our unmapped region then erase all
+ // up to it
+ unmappedEnd->virt = virt;
+ return unmappedEnd;
+ }
+ }()};
+
+ // We can't have two unmapped regions after each other
+ if (eraseEnd != blocks.end() &&
+ (eraseEnd == blockStartSuccessor ||
+ (blockStartPredecessor->Unmapped() && eraseEnd->Unmapped())))
+ UNREACHABLE_MSG("Multiple contiguous unmapped regions are unsupported!");
+
+ blocks.erase(blockStartSuccessor, eraseEnd);
+ }};
+
+ // We can avoid any splitting logic if these are the case
+ if (blockEndPredecessor->Unmapped()) {
+ if (blockEndPredecessor->virt > virt)
+ eraseBlocksWithEndUnmapped(blockEndPredecessor);
+
+ if (unmapCallback)
+ unmapCallback(virt, size);
+
+ return; // The region is unmapped, bail out early
+ } else if (blockEndSuccessor->virt == virtEnd && blockEndSuccessor->Unmapped()) {
+ eraseBlocksWithEndUnmapped(blockEndSuccessor);
+
+ if (unmapCallback)
+ unmapCallback(virt, size);
+
+ return; // The region is unmapped here and doesn't need splitting, bail out early
+ } else if (blockEndSuccessor == blocks.end()) {
+ // This should never happen as the end should always follow an unmapped block
+ UNREACHABLE_MSG("Unexpected Memory Manager state!");
+ } else if (blockEndSuccessor->virt != virtEnd) {
+ // If one block is directly in front then we don't have to add a tail
+
+ // The previous block is mapped so we will need to add a tail with an offset
+ PaType tailPhys{[&]() {
+ if constexpr (PaContigSplit)
+ return blockEndPredecessor->phys + virtEnd - blockEndPredecessor->virt;
+ else
+ return blockEndPredecessor->phys;
+ }()};
+
+ if (blockEndPredecessor->virt >= virt) {
+ // If this block's start would be overlapped by the unmap then reuse it as a tail block
+ blockEndPredecessor->virt = virtEnd;
+ blockEndPredecessor->phys = tailPhys;
+
+ // No longer predecessor anymore
+ blockEndSuccessor = blockEndPredecessor--;
+ } else {
+ blocks.insert(blockEndSuccessor,
+ {Block(virt, UnmappedPa, {}),
+ Block(virtEnd, tailPhys, blockEndPredecessor->extraInfo)});
+ if (unmapCallback)
+ unmapCallback(virt, size);
+
+ return; // The previous block is mapped and ends before
+ }
+ }
+
+ // Walk the block vector to find the start predecessor as this is more efficient than another
+ // binary search in most scenarios
+ auto blockStartPredecessor{walkBackToPredecessor(blockEndSuccessor)};
+ auto blockStartSuccessor{std::next(blockStartPredecessor)};
+
+ if (blockStartSuccessor->virt > virtEnd) {
+ UNREACHABLE_MSG("Unsorted block in AS map: virt: 0x{:X}", blockStartSuccessor->virt);
+ } else if (blockStartSuccessor->virt == virtEnd) {
+ // There are no blocks between the start and the end that would let us skip inserting a new
+ // one for head
+
+ // The previous block is may be unmapped, if so we don't need to insert any unmaps after it
+ if (blockStartPredecessor->Mapped())
+ blocks.insert(blockStartSuccessor, Block(virt, UnmappedPa, {}));
+ } else if (blockStartPredecessor->Unmapped()) {
+ // If the previous block is unmapped
+ blocks.erase(blockStartSuccessor, blockEndPredecessor);
+ } else {
+ // Erase overwritten blocks, skipping the first one as we have written the unmapped start
+ // block there
+ if (auto eraseStart{std::next(blockStartSuccessor)}; eraseStart != blockEndSuccessor)
+ blocks.erase(eraseStart, blockEndSuccessor);
+
+ // Add in the unmapped block header
+ blockStartSuccessor->virt = virt;
+ blockStartSuccessor->phys = UnmappedPa;
+ }
+
+ if (unmapCallback)
+ unmapCallback(virt, size);
+}
+
+ALLOC_MEMBER_CONST()::FlatAllocator(VaType vaStart, VaType vaLimit)
+ : Base(vaLimit), currentLinearAllocEnd(vaStart), vaStart(vaStart) {}
+
+ALLOC_MEMBER(VaType)::Allocate(VaType size) {
+ std::scoped_lock lock(this->blockMutex);
+
+ VaType allocStart{UnmappedVa};
+ VaType allocEnd{currentLinearAllocEnd + size};
+
+ // Avoid searching backwards in the address space if possible
+ if (allocEnd >= currentLinearAllocEnd && allocEnd <= this->vaLimit) {
+ auto allocEndSuccessor{
+ std::lower_bound(this->blocks.begin(), this->blocks.end(), allocEnd)};
+ if (allocEndSuccessor == this->blocks.begin())
+ UNREACHABLE_MSG("First block in AS map is invalid!");
+
+ auto allocEndPredecessor{std::prev(allocEndSuccessor)};
+ if (allocEndPredecessor->virt <= currentLinearAllocEnd) {
+ allocStart = currentLinearAllocEnd;
+ } else {
+ // Skip over fixed any mappings in front of us
+ while (allocEndSuccessor != this->blocks.end()) {
+ if (allocEndSuccessor->virt - allocEndPredecessor->virt < size ||
+ allocEndPredecessor->Mapped()) {
+ allocStart = allocEndPredecessor->virt;
+ break;
+ }
+
+ allocEndPredecessor = allocEndSuccessor++;
+
+ // Use the VA limit to calculate if we can fit in the final block since it has no
+ // successor
+ if (allocEndSuccessor == this->blocks.end()) {
+ allocEnd = allocEndPredecessor->virt + size;
+
+ if (allocEnd >= allocEndPredecessor->virt && allocEnd <= this->vaLimit)
+ allocStart = allocEndPredecessor->virt;
+ }
+ }
+ }
+ }
+
+ if (allocStart != UnmappedVa) {
+ currentLinearAllocEnd = allocStart + size;
+ } else { // If linear allocation overflows the AS then find a gap
+ if (this->blocks.size() <= 2)
+ UNREACHABLE_MSG("Unexpected allocator state!");
+
+ auto searchPredecessor{this->blocks.begin()};
+ auto searchSuccessor{std::next(searchPredecessor)};
+
+ while (searchSuccessor != this->blocks.end() &&
+ (searchSuccessor->virt - searchPredecessor->virt < size ||
+ searchPredecessor->Mapped())) {
+ searchPredecessor = searchSuccessor++;
+ }
+
+ if (searchSuccessor != this->blocks.end())
+ allocStart = searchPredecessor->virt;
+ else
+ return {}; // AS is full
+ }
+
+ this->MapLocked(allocStart, true, size, {});
+ return allocStart;
+}
+
+ALLOC_MEMBER(void)::AllocateFixed(VaType virt, VaType size) {
+ this->Map(virt, true, size);
+}
+
+ALLOC_MEMBER(void)::Free(VaType virt, VaType size) {
+ this->Unmap(virt, size);
+}
+} // namespace Common