summaryrefslogtreecommitdiffstats
path: root/src/common/host_memory.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/common/host_memory.cpp')
-rw-r--r--src/common/host_memory.cpp538
1 files changed, 538 insertions, 0 deletions
diff --git a/src/common/host_memory.cpp b/src/common/host_memory.cpp
new file mode 100644
index 000000000..8bd70abc7
--- /dev/null
+++ b/src/common/host_memory.cpp
@@ -0,0 +1,538 @@
+#ifdef _WIN32
+
+#include <iterator>
+#include <unordered_map>
+#include <boost/icl/separate_interval_set.hpp>
+#include <windows.h>
+#include "common/dynamic_library.h"
+
+#elif defined(__linux__) // ^^^ Windows ^^^ vvv Linux vvv
+
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE
+#endif
+#include <fcntl.h>
+#include <sys/mman.h>
+#include <unistd.h>
+
+#endif // ^^^ Linux ^^^
+
+#include <mutex>
+
+#include "common/alignment.h"
+#include "common/assert.h"
+#include "common/host_memory.h"
+#include "common/logging/log.h"
+#include "common/scope_exit.h"
+
+namespace Common {
+
+constexpr size_t PageAlignment = 0x1000;
+constexpr size_t HugePageSize = 0x200000;
+
+#ifdef _WIN32
+
+// Manually imported for MinGW compatibility
+#ifndef MEM_RESERVE_PLACEHOLDER
+#define MEM_RESERVE_PLACEHOLDER 0x0004000
+#endif
+#ifndef MEM_REPLACE_PLACEHOLDER
+#define MEM_REPLACE_PLACEHOLDER 0x00004000
+#endif
+#ifndef MEM_COALESCE_PLACEHOLDERS
+#define MEM_COALESCE_PLACEHOLDERS 0x00000001
+#endif
+#ifndef MEM_PRESERVE_PLACEHOLDER
+#define MEM_PRESERVE_PLACEHOLDER 0x00000002
+#endif
+
+using PFN_CreateFileMapping2 = _Ret_maybenull_ HANDLE(WINAPI*)(
+ _In_ HANDLE File, _In_opt_ SECURITY_ATTRIBUTES* SecurityAttributes, _In_ ULONG DesiredAccess,
+ _In_ ULONG PageProtection, _In_ ULONG AllocationAttributes, _In_ ULONG64 MaximumSize,
+ _In_opt_ PCWSTR Name,
+ _Inout_updates_opt_(ParameterCount) MEM_EXTENDED_PARAMETER* ExtendedParameters,
+ _In_ ULONG ParameterCount);
+
+using PFN_VirtualAlloc2 = _Ret_maybenull_ PVOID(WINAPI*)(
+ _In_opt_ HANDLE Process, _In_opt_ PVOID BaseAddress, _In_ SIZE_T Size,
+ _In_ ULONG AllocationType, _In_ ULONG PageProtection,
+ _Inout_updates_opt_(ParameterCount) MEM_EXTENDED_PARAMETER* ExtendedParameters,
+ _In_ ULONG ParameterCount);
+
+using PFN_MapViewOfFile3 = _Ret_maybenull_ PVOID(WINAPI*)(
+ _In_ HANDLE FileMapping, _In_opt_ HANDLE Process, _In_opt_ PVOID BaseAddress,
+ _In_ ULONG64 Offset, _In_ SIZE_T ViewSize, _In_ ULONG AllocationType, _In_ ULONG PageProtection,
+ _Inout_updates_opt_(ParameterCount) MEM_EXTENDED_PARAMETER* ExtendedParameters,
+ _In_ ULONG ParameterCount);
+
+using PFN_UnmapViewOfFile2 = BOOL(WINAPI*)(_In_ HANDLE Process, _In_ PVOID BaseAddress,
+ _In_ ULONG UnmapFlags);
+
+template <typename T>
+static void GetFuncAddress(Common::DynamicLibrary& dll, const char* name, T& pfn) {
+ if (!dll.GetSymbol(name, &pfn)) {
+ LOG_CRITICAL(HW_Memory, "Failed to load {}", name);
+ throw std::bad_alloc{};
+ }
+}
+
+class HostMemory::Impl {
+public:
+ explicit Impl(size_t backing_size_, size_t virtual_size_)
+ : backing_size{backing_size_}, virtual_size{virtual_size_}, process{GetCurrentProcess()},
+ kernelbase_dll("Kernelbase") {
+ if (!kernelbase_dll.IsOpen()) {
+ LOG_CRITICAL(HW_Memory, "Failed to load Kernelbase.dll");
+ throw std::bad_alloc{};
+ }
+ GetFuncAddress(kernelbase_dll, "CreateFileMapping2", pfn_CreateFileMapping2);
+ GetFuncAddress(kernelbase_dll, "VirtualAlloc2", pfn_VirtualAlloc2);
+ GetFuncAddress(kernelbase_dll, "MapViewOfFile3", pfn_MapViewOfFile3);
+ GetFuncAddress(kernelbase_dll, "UnmapViewOfFile2", pfn_UnmapViewOfFile2);
+
+ // Allocate backing file map
+ backing_handle =
+ pfn_CreateFileMapping2(INVALID_HANDLE_VALUE, nullptr, FILE_MAP_WRITE | FILE_MAP_READ,
+ PAGE_READWRITE, SEC_COMMIT, backing_size, nullptr, nullptr, 0);
+ if (!backing_handle) {
+ LOG_CRITICAL(HW_Memory, "Failed to allocate {} MiB of backing memory",
+ backing_size >> 20);
+ throw std::bad_alloc{};
+ }
+ // Allocate a virtual memory for the backing file map as placeholder
+ backing_base = static_cast<u8*>(pfn_VirtualAlloc2(process, nullptr, backing_size,
+ MEM_RESERVE | MEM_RESERVE_PLACEHOLDER,
+ PAGE_NOACCESS, nullptr, 0));
+ if (!backing_base) {
+ Release();
+ LOG_CRITICAL(HW_Memory, "Failed to reserve {} MiB of virtual memory",
+ backing_size >> 20);
+ throw std::bad_alloc{};
+ }
+ // Map backing placeholder
+ void* const ret = pfn_MapViewOfFile3(backing_handle, process, backing_base, 0, backing_size,
+ MEM_REPLACE_PLACEHOLDER, PAGE_READWRITE, nullptr, 0);
+ if (ret != backing_base) {
+ Release();
+ LOG_CRITICAL(HW_Memory, "Failed to map {} MiB of virtual memory", backing_size >> 20);
+ throw std::bad_alloc{};
+ }
+ // Allocate virtual address placeholder
+ virtual_base = static_cast<u8*>(pfn_VirtualAlloc2(process, nullptr, virtual_size,
+ MEM_RESERVE | MEM_RESERVE_PLACEHOLDER,
+ PAGE_NOACCESS, nullptr, 0));
+ if (!virtual_base) {
+ Release();
+ LOG_CRITICAL(HW_Memory, "Failed to reserve {} GiB of virtual memory",
+ virtual_size >> 30);
+ throw std::bad_alloc{};
+ }
+ }
+
+ ~Impl() {
+ Release();
+ }
+
+ void Map(size_t virtual_offset, size_t host_offset, size_t length) {
+ std::unique_lock lock{placeholder_mutex};
+ if (!IsNiechePlaceholder(virtual_offset, length)) {
+ Split(virtual_offset, length);
+ }
+ ASSERT(placeholders.find({virtual_offset, virtual_offset + length}) == placeholders.end());
+ TrackPlaceholder(virtual_offset, host_offset, length);
+
+ MapView(virtual_offset, host_offset, length);
+ }
+
+ void Unmap(size_t virtual_offset, size_t length) {
+ std::lock_guard lock{placeholder_mutex};
+
+ // Unmap until there are no more placeholders
+ while (UnmapOnePlaceholder(virtual_offset, length)) {
+ }
+ }
+
+ void Protect(size_t virtual_offset, size_t length, bool read, bool write) {
+ DWORD new_flags{};
+ if (read && write) {
+ new_flags = PAGE_READWRITE;
+ } else if (read && !write) {
+ new_flags = PAGE_READONLY;
+ } else if (!read && !write) {
+ new_flags = PAGE_NOACCESS;
+ } else {
+ UNIMPLEMENTED_MSG("Protection flag combination read={} write={}", read, write);
+ }
+ const size_t virtual_end = virtual_offset + length;
+
+ std::lock_guard lock{placeholder_mutex};
+ auto [it, end] = placeholders.equal_range({virtual_offset, virtual_end});
+ while (it != end) {
+ const size_t offset = std::max(it->lower(), virtual_offset);
+ const size_t protect_length = std::min(it->upper(), virtual_end) - offset;
+ DWORD old_flags{};
+ if (!VirtualProtect(virtual_base + offset, protect_length, new_flags, &old_flags)) {
+ LOG_CRITICAL(HW_Memory, "Failed to change virtual memory protect rules");
+ }
+ ++it;
+ }
+ }
+
+ const size_t backing_size; ///< Size of the backing memory in bytes
+ const size_t virtual_size; ///< Size of the virtual address placeholder in bytes
+
+ u8* backing_base{};
+ u8* virtual_base{};
+
+private:
+ /// Release all resources in the object
+ void Release() {
+ if (!placeholders.empty()) {
+ for (const auto& placeholder : placeholders) {
+ if (!pfn_UnmapViewOfFile2(process, virtual_base + placeholder.lower(),
+ MEM_PRESERVE_PLACEHOLDER)) {
+ LOG_CRITICAL(HW_Memory, "Failed to unmap virtual memory placeholder");
+ }
+ }
+ Coalesce(0, virtual_size);
+ }
+ if (virtual_base) {
+ if (!VirtualFree(virtual_base, 0, MEM_RELEASE)) {
+ LOG_CRITICAL(HW_Memory, "Failed to free virtual memory");
+ }
+ }
+ if (backing_base) {
+ if (!pfn_UnmapViewOfFile2(process, backing_base, MEM_PRESERVE_PLACEHOLDER)) {
+ LOG_CRITICAL(HW_Memory, "Failed to unmap backing memory placeholder");
+ }
+ if (!VirtualFreeEx(process, backing_base, 0, MEM_RELEASE)) {
+ LOG_CRITICAL(HW_Memory, "Failed to free backing memory");
+ }
+ }
+ if (!CloseHandle(backing_handle)) {
+ LOG_CRITICAL(HW_Memory, "Failed to free backing memory file handle");
+ }
+ }
+
+ /// Unmap one placeholder in the given range (partial unmaps are supported)
+ /// Return true when there are no more placeholders to unmap
+ bool UnmapOnePlaceholder(size_t virtual_offset, size_t length) {
+ const auto it = placeholders.find({virtual_offset, virtual_offset + length});
+ const auto begin = placeholders.begin();
+ const auto end = placeholders.end();
+ if (it == end) {
+ return false;
+ }
+ const size_t placeholder_begin = it->lower();
+ const size_t placeholder_end = it->upper();
+ const size_t unmap_begin = std::max(virtual_offset, placeholder_begin);
+ const size_t unmap_end = std::min(virtual_offset + length, placeholder_end);
+ ASSERT(unmap_begin >= placeholder_begin && unmap_begin < placeholder_end);
+ ASSERT(unmap_end <= placeholder_end && unmap_end > placeholder_begin);
+
+ const auto host_pointer_it = placeholder_host_pointers.find(placeholder_begin);
+ ASSERT(host_pointer_it != placeholder_host_pointers.end());
+ const size_t host_offset = host_pointer_it->second;
+
+ const bool split_left = unmap_begin > placeholder_begin;
+ const bool split_right = unmap_end < placeholder_end;
+
+ if (!pfn_UnmapViewOfFile2(process, virtual_base + placeholder_begin,
+ MEM_PRESERVE_PLACEHOLDER)) {
+ LOG_CRITICAL(HW_Memory, "Failed to unmap placeholder");
+ }
+ // If we have to remap memory regions due to partial unmaps, we are in a data race as
+ // Windows doesn't support remapping memory without unmapping first. Avoid adding any extra
+ // logic within the panic region described below.
+
+ // Panic region, we are in a data race right now
+ if (split_left || split_right) {
+ Split(unmap_begin, unmap_end - unmap_begin);
+ }
+ if (split_left) {
+ MapView(placeholder_begin, host_offset, unmap_begin - placeholder_begin);
+ }
+ if (split_right) {
+ MapView(unmap_end, host_offset + unmap_end - placeholder_begin,
+ placeholder_end - unmap_end);
+ }
+ // End panic region
+
+ size_t coalesce_begin = unmap_begin;
+ if (!split_left) {
+ // Try to coalesce pages to the left
+ coalesce_begin = it == begin ? 0 : std::prev(it)->upper();
+ if (coalesce_begin != placeholder_begin) {
+ Coalesce(coalesce_begin, unmap_end - coalesce_begin);
+ }
+ }
+ if (!split_right) {
+ // Try to coalesce pages to the right
+ const auto next = std::next(it);
+ const size_t next_begin = next == end ? virtual_size : next->lower();
+ if (placeholder_end != next_begin) {
+ // We can coalesce to the right
+ Coalesce(coalesce_begin, next_begin - coalesce_begin);
+ }
+ }
+ // Remove and reinsert placeholder trackers
+ UntrackPlaceholder(it);
+ if (split_left) {
+ TrackPlaceholder(placeholder_begin, host_offset, unmap_begin - placeholder_begin);
+ }
+ if (split_right) {
+ TrackPlaceholder(unmap_end, host_offset + unmap_end - placeholder_begin,
+ placeholder_end - unmap_end);
+ }
+ return true;
+ }
+
+ void MapView(size_t virtual_offset, size_t host_offset, size_t length) {
+ if (!pfn_MapViewOfFile3(backing_handle, process, virtual_base + virtual_offset, host_offset,
+ length, MEM_REPLACE_PLACEHOLDER, PAGE_READWRITE, nullptr, 0)) {
+ LOG_CRITICAL(HW_Memory, "Failed to map placeholder");
+ }
+ }
+
+ void Split(size_t virtual_offset, size_t length) {
+ if (!VirtualFreeEx(process, reinterpret_cast<LPVOID>(virtual_base + virtual_offset), length,
+ MEM_RELEASE | MEM_PRESERVE_PLACEHOLDER)) {
+ LOG_CRITICAL(HW_Memory, "Failed to split placeholder");
+ }
+ }
+
+ void Coalesce(size_t virtual_offset, size_t length) {
+ if (!VirtualFreeEx(process, reinterpret_cast<LPVOID>(virtual_base + virtual_offset), length,
+ MEM_RELEASE | MEM_COALESCE_PLACEHOLDERS)) {
+ LOG_CRITICAL(HW_Memory, "Failed to coalesce placeholders");
+ }
+ }
+
+ void TrackPlaceholder(size_t virtual_offset, size_t host_offset, size_t length) {
+ placeholders.insert({virtual_offset, virtual_offset + length});
+ placeholder_host_pointers.emplace(virtual_offset, host_offset);
+ }
+
+ void UntrackPlaceholder(boost::icl::separate_interval_set<size_t>::iterator it) {
+ placeholders.erase(it);
+ placeholder_host_pointers.erase(it->lower());
+ }
+
+ /// Return true when a given memory region is a "nieche" and the placeholders don't have to be
+ /// splitted.
+ bool IsNiechePlaceholder(size_t virtual_offset, size_t length) const {
+ const auto it = placeholders.upper_bound({virtual_offset, virtual_offset + length});
+ if (it != placeholders.end() && it->lower() == virtual_offset + length) {
+ const bool is_root = it == placeholders.begin() && virtual_offset == 0;
+ return is_root || std::prev(it)->upper() == virtual_offset;
+ }
+ return false;
+ }
+
+ HANDLE process{}; ///< Current process handle
+ HANDLE backing_handle{}; ///< File based backing memory
+
+ DynamicLibrary kernelbase_dll;
+ PFN_CreateFileMapping2 pfn_CreateFileMapping2{};
+ PFN_VirtualAlloc2 pfn_VirtualAlloc2{};
+ PFN_MapViewOfFile3 pfn_MapViewOfFile3{};
+ PFN_UnmapViewOfFile2 pfn_UnmapViewOfFile2{};
+
+ std::mutex placeholder_mutex; ///< Mutex for placeholders
+ boost::icl::separate_interval_set<size_t> placeholders; ///< Mapped placeholders
+ std::unordered_map<size_t, size_t> placeholder_host_pointers; ///< Placeholder backing offset
+};
+
+#elif defined(__linux__) // ^^^ Windows ^^^ vvv Linux vvv
+
+class HostMemory::Impl {
+public:
+ explicit Impl(size_t backing_size_, size_t virtual_size_)
+ : backing_size{backing_size_}, virtual_size{virtual_size_} {
+ bool good = false;
+ SCOPE_EXIT({
+ if (!good) {
+ Release();
+ }
+ });
+
+ // Backing memory initialization
+ fd = memfd_create("HostMemory", 0);
+ if (fd == -1) {
+ LOG_CRITICAL(HW_Memory, "memfd_create failed: {}", strerror(errno));
+ throw std::bad_alloc{};
+ }
+
+ // Defined to extend the file with zeros
+ int ret = ftruncate(fd, backing_size);
+ if (ret != 0) {
+ LOG_CRITICAL(HW_Memory, "ftruncate failed with {}, are you out-of-memory?",
+ strerror(errno));
+ throw std::bad_alloc{};
+ }
+
+ backing_base = static_cast<u8*>(
+ mmap(nullptr, backing_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0));
+ if (backing_base == MAP_FAILED) {
+ LOG_CRITICAL(HW_Memory, "mmap failed: {}", strerror(errno));
+ throw std::bad_alloc{};
+ }
+
+ // Virtual memory initialization
+ virtual_base = static_cast<u8*>(
+ mmap(nullptr, virtual_size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0));
+ if (virtual_base == MAP_FAILED) {
+ LOG_CRITICAL(HW_Memory, "mmap failed: {}", strerror(errno));
+ throw std::bad_alloc{};
+ }
+
+ good = true;
+ }
+
+ ~Impl() {
+ Release();
+ }
+
+ void Map(size_t virtual_offset, size_t host_offset, size_t length) {
+
+ void* ret = mmap(virtual_base + virtual_offset, length, PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_FIXED, fd, host_offset);
+ ASSERT_MSG(ret != MAP_FAILED, "mmap failed: {}", strerror(errno));
+ }
+
+ void Unmap(size_t virtual_offset, size_t length) {
+ // The method name is wrong. We're still talking about the virtual range.
+ // We don't want to unmap, we want to reserve this memory.
+
+ void* ret = mmap(virtual_base + virtual_offset, length, PROT_NONE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
+ ASSERT_MSG(ret != MAP_FAILED, "mmap failed: {}", strerror(errno));
+ }
+
+ void Protect(size_t virtual_offset, size_t length, bool read, bool write) {
+ int flags = 0;
+ if (read) {
+ flags |= PROT_READ;
+ }
+ if (write) {
+ flags |= PROT_WRITE;
+ }
+ int ret = mprotect(virtual_base + virtual_offset, length, flags);
+ ASSERT_MSG(ret == 0, "mprotect failed: {}", strerror(errno));
+ }
+
+ const size_t backing_size; ///< Size of the backing memory in bytes
+ const size_t virtual_size; ///< Size of the virtual address placeholder in bytes
+
+ u8* backing_base{reinterpret_cast<u8*>(MAP_FAILED)};
+ u8* virtual_base{reinterpret_cast<u8*>(MAP_FAILED)};
+
+private:
+ /// Release all resources in the object
+ void Release() {
+ if (virtual_base != MAP_FAILED) {
+ int ret = munmap(virtual_base, virtual_size);
+ ASSERT_MSG(ret == 0, "munmap failed: {}", strerror(errno));
+ }
+
+ if (backing_base != MAP_FAILED) {
+ int ret = munmap(backing_base, backing_size);
+ ASSERT_MSG(ret == 0, "munmap failed: {}", strerror(errno));
+ }
+
+ if (fd != -1) {
+ int ret = close(fd);
+ ASSERT_MSG(ret == 0, "close failed: {}", strerror(errno));
+ }
+ }
+
+ int fd{-1}; // memfd file descriptor, -1 is the error value of memfd_create
+};
+
+#else // ^^^ Linux ^^^ vvv Generic vvv
+
+class HostMemory::Impl {
+public:
+ explicit Impl(size_t /*backing_size */, size_t /* virtual_size */) {
+ // This is just a place holder.
+ // Please implement fastmem in a propper way on your platform.
+ throw std::bad_alloc{};
+ }
+
+ void Map(size_t virtual_offset, size_t host_offset, size_t length) {}
+
+ void Unmap(size_t virtual_offset, size_t length) {}
+
+ void Protect(size_t virtual_offset, size_t length, bool read, bool write) {}
+
+ u8* backing_base{nullptr};
+ u8* virtual_base{nullptr};
+};
+
+#endif // ^^^ Generic ^^^
+
+HostMemory::HostMemory(size_t backing_size_, size_t virtual_size_)
+ : backing_size(backing_size_), virtual_size(virtual_size_) {
+ try {
+ // Try to allocate a fastmem arena.
+ // The implementation will fail with std::bad_alloc on errors.
+ impl = std::make_unique<HostMemory::Impl>(AlignUp(backing_size, PageAlignment),
+ AlignUp(virtual_size, PageAlignment) +
+ 3 * HugePageSize);
+ backing_base = impl->backing_base;
+ virtual_base = impl->virtual_base;
+
+ if (virtual_base) {
+ virtual_base += 2 * HugePageSize - 1;
+ virtual_base -= reinterpret_cast<size_t>(virtual_base) & (HugePageSize - 1);
+ virtual_base_offset = virtual_base - impl->virtual_base;
+ }
+
+ } catch (const std::bad_alloc&) {
+ LOG_CRITICAL(HW_Memory,
+ "Fastmem unavailable, falling back to VirtualBuffer for memory allocation");
+ fallback_buffer = std::make_unique<Common::VirtualBuffer<u8>>(backing_size);
+ backing_base = fallback_buffer->data();
+ virtual_base = nullptr;
+ }
+}
+
+HostMemory::~HostMemory() = default;
+
+HostMemory::HostMemory(HostMemory&&) noexcept = default;
+
+HostMemory& HostMemory::operator=(HostMemory&&) noexcept = default;
+
+void HostMemory::Map(size_t virtual_offset, size_t host_offset, size_t length) {
+ ASSERT(virtual_offset % PageAlignment == 0);
+ ASSERT(host_offset % PageAlignment == 0);
+ ASSERT(length % PageAlignment == 0);
+ ASSERT(virtual_offset + length <= virtual_size);
+ ASSERT(host_offset + length <= backing_size);
+ if (length == 0 || !virtual_base || !impl) {
+ return;
+ }
+ impl->Map(virtual_offset + virtual_base_offset, host_offset, length);
+}
+
+void HostMemory::Unmap(size_t virtual_offset, size_t length) {
+ ASSERT(virtual_offset % PageAlignment == 0);
+ ASSERT(length % PageAlignment == 0);
+ ASSERT(virtual_offset + length <= virtual_size);
+ if (length == 0 || !virtual_base || !impl) {
+ return;
+ }
+ impl->Unmap(virtual_offset + virtual_base_offset, length);
+}
+
+void HostMemory::Protect(size_t virtual_offset, size_t length, bool read, bool write) {
+ ASSERT(virtual_offset % PageAlignment == 0);
+ ASSERT(length % PageAlignment == 0);
+ ASSERT(virtual_offset + length <= virtual_size);
+ if (length == 0 || !virtual_base || !impl) {
+ return;
+ }
+ impl->Protect(virtual_offset + virtual_base_offset, length, read, write);
+}
+
+} // namespace Common