summaryrefslogtreecommitdiffstats
path: root/src/core/hle
diff options
context:
space:
mode:
authorliamwhite <liamwhite@users.noreply.github.com>2024-01-22 16:55:39 +0100
committerGitHub <noreply@github.com>2024-01-22 16:55:39 +0100
commit8bd10473d60503c7acddc399604a51b9c9947541 (patch)
treef713f84942681321fca27ba028e31d6c74a09013 /src/core/hle
parentMerge pull request #12747 from t895/homescreen-widget (diff)
parentdevice_memory_manager: use unique_lock for update (diff)
downloadyuzu-8bd10473d60503c7acddc399604a51b9c9947541.tar
yuzu-8bd10473d60503c7acddc399604a51b9c9947541.tar.gz
yuzu-8bd10473d60503c7acddc399604a51b9c9947541.tar.bz2
yuzu-8bd10473d60503c7acddc399604a51b9c9947541.tar.lz
yuzu-8bd10473d60503c7acddc399604a51b9c9947541.tar.xz
yuzu-8bd10473d60503c7acddc399604a51b9c9947541.tar.zst
yuzu-8bd10473d60503c7acddc399604a51b9c9947541.zip
Diffstat (limited to '')
-rw-r--r--src/core/hle/kernel/k_process.cpp14
-rw-r--r--src/core/hle/kernel/k_process.h4
-rw-r--r--src/core/hle/service/hle_ipc.cpp61
-rw-r--r--src/core/hle/service/hle_ipc.h5
-rw-r--r--src/core/hle/service/nvdrv/core/container.cpp114
-rw-r--r--src/core/hle/service/nvdrv/core/container.h32
-rw-r--r--src/core/hle/service/nvdrv/core/heap_mapper.cpp175
-rw-r--r--src/core/hle/service/nvdrv/core/heap_mapper.h49
-rw-r--r--src/core/hle/service/nvdrv/core/nvmap.cpp120
-rw-r--r--src/core/hle/service/nvdrv/core/nvmap.h25
-rw-r--r--src/core/hle/service/nvdrv/devices/nvdevice.h3
-rw-r--r--src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp4
-rw-r--r--src/core/hle/service/nvdrv/devices/nvdisp_disp0.h2
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp36
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h15
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp2
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_ctrl.h2
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp2
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h2
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp2
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_gpu.h2
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp9
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_nvdec.h2
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp13
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h5
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_nvjpg.cpp2
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_nvjpg.h2
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_vic.cpp7
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_vic.h2
-rw-r--r--src/core/hle/service/nvdrv/devices/nvmap.cpp31
-rw-r--r--src/core/hle/service/nvdrv/devices/nvmap.h7
-rw-r--r--src/core/hle/service/nvdrv/nvdrv.cpp27
-rw-r--r--src/core/hle/service/nvdrv/nvdrv.h6
-rw-r--r--src/core/hle/service/nvdrv/nvdrv_interface.cpp34
-rw-r--r--src/core/hle/service/nvdrv/nvdrv_interface.h1
-rw-r--r--src/core/hle/service/nvnflinger/fb_share_buffer_manager.cpp25
-rw-r--r--src/core/hle/service/nvnflinger/fb_share_buffer_manager.h5
-rw-r--r--src/core/hle/service/nvnflinger/nvnflinger.cpp2
-rw-r--r--src/core/hle/service/nvnflinger/ui/graphic_buffer.cpp2
39 files changed, 657 insertions, 196 deletions
diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp
index 53735a225..0b08e877e 100644
--- a/src/core/hle/kernel/k_process.cpp
+++ b/src/core/hle/kernel/k_process.cpp
@@ -5,6 +5,7 @@
#include "common/scope_exit.h"
#include "common/settings.h"
#include "core/core.h"
+#include "core/gpu_dirty_memory_manager.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_scoped_resource_reservation.h"
#include "core/hle/kernel/k_shared_memory.h"
@@ -320,7 +321,7 @@ Result KProcess::Initialize(const Svc::CreateProcessParameter& params, const KPa
// Ensure our memory is initialized.
m_memory.SetCurrentPageTable(*this);
- m_memory.SetGPUDirtyManagers(m_dirty_memory_managers);
+ m_memory.SetGPUDirtyManagers(m_kernel.System().GetGPUDirtyMemoryManager());
// Ensure we can insert the code region.
R_UNLESS(m_page_table.CanContain(params.code_address, params.code_num_pages * PageSize,
@@ -417,7 +418,7 @@ Result KProcess::Initialize(const Svc::CreateProcessParameter& params,
// Ensure our memory is initialized.
m_memory.SetCurrentPageTable(*this);
- m_memory.SetGPUDirtyManagers(m_dirty_memory_managers);
+ m_memory.SetGPUDirtyManagers(m_kernel.System().GetGPUDirtyMemoryManager());
// Ensure we can insert the code region.
R_UNLESS(m_page_table.CanContain(params.code_address, code_size, KMemoryState::Code),
@@ -1141,8 +1142,7 @@ void KProcess::Switch(KProcess* cur_process, KProcess* next_process) {}
KProcess::KProcess(KernelCore& kernel)
: KAutoObjectWithSlabHeapAndContainer(kernel), m_page_table{kernel}, m_state_lock{kernel},
m_list_lock{kernel}, m_cond_var{kernel.System()}, m_address_arbiter{kernel.System()},
- m_handle_table{kernel}, m_dirty_memory_managers{},
- m_exclusive_monitor{}, m_memory{kernel.System()} {}
+ m_handle_table{kernel}, m_exclusive_monitor{}, m_memory{kernel.System()} {}
KProcess::~KProcess() = default;
Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size,
@@ -1324,10 +1324,4 @@ bool KProcess::RemoveWatchpoint(KProcessAddress addr, u64 size, DebugWatchpointT
return true;
}
-void KProcess::GatherGPUDirtyMemory(std::function<void(VAddr, size_t)>& callback) {
- for (auto& manager : m_dirty_memory_managers) {
- manager.Gather(callback);
- }
-}
-
} // namespace Kernel
diff --git a/src/core/hle/kernel/k_process.h b/src/core/hle/kernel/k_process.h
index 53c0e3316..ab1358a12 100644
--- a/src/core/hle/kernel/k_process.h
+++ b/src/core/hle/kernel/k_process.h
@@ -7,7 +7,6 @@
#include "core/arm/arm_interface.h"
#include "core/file_sys/program_metadata.h"
-#include "core/gpu_dirty_memory_manager.h"
#include "core/hle/kernel/code_set.h"
#include "core/hle/kernel/k_address_arbiter.h"
#include "core/hle/kernel/k_capabilities.h"
@@ -128,7 +127,6 @@ private:
#ifdef HAS_NCE
std::unordered_map<u64, u64> m_post_handlers{};
#endif
- std::array<Core::GPUDirtyMemoryManager, Core::Hardware::NUM_CPU_CORES> m_dirty_memory_managers;
std::unique_ptr<Core::ExclusiveMonitor> m_exclusive_monitor;
Core::Memory::Memory m_memory;
@@ -511,8 +509,6 @@ public:
return m_memory;
}
- void GatherGPUDirtyMemory(std::function<void(VAddr, size_t)>& callback);
-
Core::ExclusiveMonitor& GetExclusiveMonitor() const {
return *m_exclusive_monitor;
}
diff --git a/src/core/hle/service/hle_ipc.cpp b/src/core/hle/service/hle_ipc.cpp
index 3f38ceb03..e491dd260 100644
--- a/src/core/hle/service/hle_ipc.cpp
+++ b/src/core/hle/service/hle_ipc.cpp
@@ -12,6 +12,7 @@
#include "common/common_types.h"
#include "common/logging/log.h"
#include "common/scratch_buffer.h"
+#include "core/guest_memory.h"
#include "core/hle/kernel/k_auto_object.h"
#include "core/hle/kernel/k_handle_table.h"
#include "core/hle/kernel/k_process.h"
@@ -23,19 +24,6 @@
#include "core/hle/service/ipc_helpers.h"
#include "core/memory.h"
-namespace {
-static thread_local std::array read_buffer_data_a{
- Common::ScratchBuffer<u8>(),
- Common::ScratchBuffer<u8>(),
- Common::ScratchBuffer<u8>(),
-};
-static thread_local std::array read_buffer_data_x{
- Common::ScratchBuffer<u8>(),
- Common::ScratchBuffer<u8>(),
- Common::ScratchBuffer<u8>(),
-};
-} // Anonymous namespace
-
namespace Service {
SessionRequestHandler::SessionRequestHandler(Kernel::KernelCore& kernel_, const char* service_name_)
@@ -343,48 +331,27 @@ std::vector<u8> HLERequestContext::ReadBufferCopy(std::size_t buffer_index) cons
}
std::span<const u8> HLERequestContext::ReadBufferA(std::size_t buffer_index) const {
- static thread_local std::array read_buffer_a{
- Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
- Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
- Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
- };
+ Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::UnsafeRead> gm(memory, 0, 0);
ASSERT_OR_EXECUTE_MSG(
BufferDescriptorA().size() > buffer_index, { return {}; },
"BufferDescriptorA invalid buffer_index {}", buffer_index);
- auto& read_buffer = read_buffer_a[buffer_index];
- return read_buffer.Read(BufferDescriptorA()[buffer_index].Address(),
- BufferDescriptorA()[buffer_index].Size(),
- &read_buffer_data_a[buffer_index]);
+ return gm.Read(BufferDescriptorA()[buffer_index].Address(),
+ BufferDescriptorA()[buffer_index].Size(), &read_buffer_data_a[buffer_index]);
}
std::span<const u8> HLERequestContext::ReadBufferX(std::size_t buffer_index) const {
- static thread_local std::array read_buffer_x{
- Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
- Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
- Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
- };
+ Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::UnsafeRead> gm(memory, 0, 0);
ASSERT_OR_EXECUTE_MSG(
BufferDescriptorX().size() > buffer_index, { return {}; },
"BufferDescriptorX invalid buffer_index {}", buffer_index);
- auto& read_buffer = read_buffer_x[buffer_index];
- return read_buffer.Read(BufferDescriptorX()[buffer_index].Address(),
- BufferDescriptorX()[buffer_index].Size(),
- &read_buffer_data_x[buffer_index]);
+ return gm.Read(BufferDescriptorX()[buffer_index].Address(),
+ BufferDescriptorX()[buffer_index].Size(), &read_buffer_data_x[buffer_index]);
}
std::span<const u8> HLERequestContext::ReadBuffer(std::size_t buffer_index) const {
- static thread_local std::array read_buffer_a{
- Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
- Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
- Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
- };
- static thread_local std::array read_buffer_x{
- Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
- Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
- Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0),
- };
+ Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::UnsafeRead> gm(memory, 0, 0);
const bool is_buffer_a{BufferDescriptorA().size() > buffer_index &&
BufferDescriptorA()[buffer_index].Size()};
@@ -401,18 +368,14 @@ std::span<const u8> HLERequestContext::ReadBuffer(std::size_t buffer_index) cons
ASSERT_OR_EXECUTE_MSG(
BufferDescriptorA().size() > buffer_index, { return {}; },
"BufferDescriptorA invalid buffer_index {}", buffer_index);
- auto& read_buffer = read_buffer_a[buffer_index];
- return read_buffer.Read(BufferDescriptorA()[buffer_index].Address(),
- BufferDescriptorA()[buffer_index].Size(),
- &read_buffer_data_a[buffer_index]);
+ return gm.Read(BufferDescriptorA()[buffer_index].Address(),
+ BufferDescriptorA()[buffer_index].Size(), &read_buffer_data_a[buffer_index]);
} else {
ASSERT_OR_EXECUTE_MSG(
BufferDescriptorX().size() > buffer_index, { return {}; },
"BufferDescriptorX invalid buffer_index {}", buffer_index);
- auto& read_buffer = read_buffer_x[buffer_index];
- return read_buffer.Read(BufferDescriptorX()[buffer_index].Address(),
- BufferDescriptorX()[buffer_index].Size(),
- &read_buffer_data_x[buffer_index]);
+ return gm.Read(BufferDescriptorX()[buffer_index].Address(),
+ BufferDescriptorX()[buffer_index].Size(), &read_buffer_data_x[buffer_index]);
}
}
diff --git a/src/core/hle/service/hle_ipc.h b/src/core/hle/service/hle_ipc.h
index 440737db5..8329d7265 100644
--- a/src/core/hle/service/hle_ipc.h
+++ b/src/core/hle/service/hle_ipc.h
@@ -41,6 +41,8 @@ class KernelCore;
class KHandleTable;
class KProcess;
class KServerSession;
+template <typename T>
+class KScopedAutoObject;
class KThread;
} // namespace Kernel
@@ -424,6 +426,9 @@ private:
Kernel::KernelCore& kernel;
Core::Memory::Memory& memory;
+
+ mutable std::array<Common::ScratchBuffer<u8>, 3> read_buffer_data_a{};
+ mutable std::array<Common::ScratchBuffer<u8>, 3> read_buffer_data_x{};
};
} // namespace Service
diff --git a/src/core/hle/service/nvdrv/core/container.cpp b/src/core/hle/service/nvdrv/core/container.cpp
index 37ca24f5d..21ef57d27 100644
--- a/src/core/hle/service/nvdrv/core/container.cpp
+++ b/src/core/hle/service/nvdrv/core/container.cpp
@@ -2,27 +2,135 @@
// SPDX-FileCopyrightText: 2022 Skyline Team and Contributors
// SPDX-License-Identifier: GPL-3.0-or-later
+#include <atomic>
+#include <deque>
+#include <mutex>
+
+#include "core/hle/kernel/k_process.h"
#include "core/hle/service/nvdrv/core/container.h"
+#include "core/hle/service/nvdrv/core/heap_mapper.h"
#include "core/hle/service/nvdrv/core/nvmap.h"
#include "core/hle/service/nvdrv/core/syncpoint_manager.h"
+#include "core/memory.h"
#include "video_core/host1x/host1x.h"
namespace Service::Nvidia::NvCore {
+Session::Session(SessionId id_, Kernel::KProcess* process_, Core::Asid asid_)
+ : id{id_}, process{process_}, asid{asid_}, has_preallocated_area{}, mapper{}, is_active{} {}
+
+Session::~Session() = default;
+
struct ContainerImpl {
- explicit ContainerImpl(Tegra::Host1x::Host1x& host1x_)
- : file{host1x_}, manager{host1x_}, device_file_data{} {}
+ explicit ContainerImpl(Container& core, Tegra::Host1x::Host1x& host1x_)
+ : host1x{host1x_}, file{core, host1x_}, manager{host1x_}, device_file_data{} {}
+ Tegra::Host1x::Host1x& host1x;
NvMap file;
SyncpointManager manager;
Container::Host1xDeviceFileData device_file_data;
+ std::deque<Session> sessions;
+ size_t new_ids{};
+ std::deque<size_t> id_pool;
+ std::mutex session_guard;
};
Container::Container(Tegra::Host1x::Host1x& host1x_) {
- impl = std::make_unique<ContainerImpl>(host1x_);
+ impl = std::make_unique<ContainerImpl>(*this, host1x_);
}
Container::~Container() = default;
+SessionId Container::OpenSession(Kernel::KProcess* process) {
+ using namespace Common::Literals;
+
+ std::scoped_lock lk(impl->session_guard);
+ for (auto& session : impl->sessions) {
+ if (!session.is_active) {
+ continue;
+ }
+ if (session.process == process) {
+ return session.id;
+ }
+ }
+ size_t new_id{};
+ auto* memory_interface = &process->GetMemory();
+ auto& smmu = impl->host1x.MemoryManager();
+ auto asid = smmu.RegisterProcess(memory_interface);
+ if (!impl->id_pool.empty()) {
+ new_id = impl->id_pool.front();
+ impl->id_pool.pop_front();
+ impl->sessions[new_id] = Session{SessionId{new_id}, process, asid};
+ } else {
+ new_id = impl->new_ids++;
+ impl->sessions.emplace_back(SessionId{new_id}, process, asid);
+ }
+ auto& session = impl->sessions[new_id];
+ session.is_active = true;
+ // Optimization
+ if (process->IsApplication()) {
+ auto& page_table = process->GetPageTable().GetBasePageTable();
+ auto heap_start = page_table.GetHeapRegionStart();
+
+ Kernel::KProcessAddress cur_addr = heap_start;
+ size_t region_size = 0;
+ VAddr region_start = 0;
+ while (true) {
+ Kernel::KMemoryInfo mem_info{};
+ Kernel::Svc::PageInfo page_info{};
+ R_ASSERT(page_table.QueryInfo(std::addressof(mem_info), std::addressof(page_info),
+ cur_addr));
+ auto svc_mem_info = mem_info.GetSvcMemoryInfo();
+
+ // Check if this memory block is heap.
+ if (svc_mem_info.state == Kernel::Svc::MemoryState::Normal) {
+ if (svc_mem_info.size > region_size) {
+ region_size = svc_mem_info.size;
+ region_start = svc_mem_info.base_address;
+ }
+ }
+
+ // Check if we're done.
+ const uintptr_t next_address = svc_mem_info.base_address + svc_mem_info.size;
+ if (next_address <= GetInteger(cur_addr)) {
+ break;
+ }
+
+ cur_addr = next_address;
+ }
+ session.has_preallocated_area = false;
+ auto start_region = region_size >= 32_MiB ? smmu.Allocate(region_size) : 0;
+ if (start_region != 0) {
+ session.mapper = std::make_unique<HeapMapper>(region_start, start_region, region_size,
+ asid, impl->host1x);
+ smmu.TrackContinuity(start_region, region_start, region_size, asid);
+ session.has_preallocated_area = true;
+ LOG_DEBUG(Debug, "Preallocation created!");
+ }
+ }
+ return SessionId{new_id};
+}
+
+void Container::CloseSession(SessionId session_id) {
+ std::scoped_lock lk(impl->session_guard);
+ auto& session = impl->sessions[session_id.id];
+ auto& smmu = impl->host1x.MemoryManager();
+ if (session.has_preallocated_area) {
+ const DAddr region_start = session.mapper->GetRegionStart();
+ const size_t region_size = session.mapper->GetRegionSize();
+ session.mapper.reset();
+ smmu.Free(region_start, region_size);
+ session.has_preallocated_area = false;
+ }
+ session.is_active = false;
+ smmu.UnregisterProcess(impl->sessions[session_id.id].asid);
+ impl->id_pool.emplace_front(session_id.id);
+}
+
+Session* Container::GetSession(SessionId session_id) {
+ std::atomic_thread_fence(std::memory_order_acquire);
+ return &impl->sessions[session_id.id];
+}
+
NvMap& Container::GetNvMapFile() {
return impl->file;
}
diff --git a/src/core/hle/service/nvdrv/core/container.h b/src/core/hle/service/nvdrv/core/container.h
index b4b63ac90..b4d3938a8 100644
--- a/src/core/hle/service/nvdrv/core/container.h
+++ b/src/core/hle/service/nvdrv/core/container.h
@@ -8,24 +8,56 @@
#include <memory>
#include <unordered_map>
+#include "core/device_memory_manager.h"
#include "core/hle/service/nvdrv/nvdata.h"
+namespace Kernel {
+class KProcess;
+}
+
namespace Tegra::Host1x {
class Host1x;
} // namespace Tegra::Host1x
namespace Service::Nvidia::NvCore {
+class HeapMapper;
class NvMap;
class SyncpointManager;
struct ContainerImpl;
+struct SessionId {
+ size_t id;
+};
+
+struct Session {
+ Session(SessionId id_, Kernel::KProcess* process_, Core::Asid asid_);
+ ~Session();
+
+ Session(const Session&) = delete;
+ Session& operator=(const Session&) = delete;
+ Session(Session&&) = default;
+ Session& operator=(Session&&) = default;
+
+ SessionId id;
+ Kernel::KProcess* process;
+ Core::Asid asid;
+ bool has_preallocated_area{};
+ std::unique_ptr<HeapMapper> mapper{};
+ bool is_active{};
+};
+
class Container {
public:
explicit Container(Tegra::Host1x::Host1x& host1x);
~Container();
+ SessionId OpenSession(Kernel::KProcess* process);
+ void CloseSession(SessionId id);
+
+ Session* GetSession(SessionId id);
+
NvMap& GetNvMapFile();
const NvMap& GetNvMapFile() const;
diff --git a/src/core/hle/service/nvdrv/core/heap_mapper.cpp b/src/core/hle/service/nvdrv/core/heap_mapper.cpp
new file mode 100644
index 000000000..096dc5deb
--- /dev/null
+++ b/src/core/hle/service/nvdrv/core/heap_mapper.cpp
@@ -0,0 +1,175 @@
+// SPDX-FileCopyrightText: 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include <mutex>
+
+#include <boost/container/small_vector.hpp>
+#define BOOST_NO_MT
+#include <boost/pool/detail/mutex.hpp>
+#undef BOOST_NO_MT
+#include <boost/icl/interval.hpp>
+#include <boost/icl/interval_base_set.hpp>
+#include <boost/icl/interval_set.hpp>
+#include <boost/icl/split_interval_map.hpp>
+#include <boost/pool/pool.hpp>
+#include <boost/pool/pool_alloc.hpp>
+#include <boost/pool/poolfwd.hpp>
+
+#include "core/hle/service/nvdrv/core/heap_mapper.h"
+#include "video_core/host1x/host1x.h"
+
+namespace boost {
+template <typename T>
+class fast_pool_allocator<T, default_user_allocator_new_delete, details::pool::null_mutex, 4096, 0>;
+}
+
+namespace Service::Nvidia::NvCore {
+
+using IntervalCompare = std::less<DAddr>;
+using IntervalInstance = boost::icl::interval_type_default<DAddr, std::less>;
+using IntervalAllocator = boost::fast_pool_allocator<DAddr>;
+using IntervalSet = boost::icl::interval_set<DAddr>;
+using IntervalType = typename IntervalSet::interval_type;
+
+template <typename Type>
+struct counter_add_functor : public boost::icl::identity_based_inplace_combine<Type> {
+ // types
+ typedef counter_add_functor<Type> type;
+ typedef boost::icl::identity_based_inplace_combine<Type> base_type;
+
+ // public member functions
+ void operator()(Type& current, const Type& added) const {
+ current += added;
+ if (current < base_type::identity_element()) {
+ current = base_type::identity_element();
+ }
+ }
+
+ // public static functions
+ static void version(Type&){};
+};
+
+using OverlapCombine = counter_add_functor<int>;
+using OverlapSection = boost::icl::inter_section<int>;
+using OverlapCounter = boost::icl::split_interval_map<DAddr, int>;
+
+struct HeapMapper::HeapMapperInternal {
+ HeapMapperInternal(Tegra::Host1x::Host1x& host1x) : device_memory{host1x.MemoryManager()} {}
+ ~HeapMapperInternal() = default;
+
+ template <typename Func>
+ void ForEachInOverlapCounter(OverlapCounter& current_range, VAddr cpu_addr, u64 size,
+ Func&& func) {
+ const DAddr start_address = cpu_addr;
+ const DAddr end_address = start_address + size;
+ const IntervalType search_interval{start_address, end_address};
+ auto it = current_range.lower_bound(search_interval);
+ if (it == current_range.end()) {
+ return;
+ }
+ auto end_it = current_range.upper_bound(search_interval);
+ for (; it != end_it; it++) {
+ auto& inter = it->first;
+ DAddr inter_addr_end = inter.upper();
+ DAddr inter_addr = inter.lower();
+ if (inter_addr_end > end_address) {
+ inter_addr_end = end_address;
+ }
+ if (inter_addr < start_address) {
+ inter_addr = start_address;
+ }
+ func(inter_addr, inter_addr_end, it->second);
+ }
+ }
+
+ void RemoveEachInOverlapCounter(OverlapCounter& current_range,
+ const IntervalType search_interval, int subtract_value) {
+ bool any_removals = false;
+ current_range.add(std::make_pair(search_interval, subtract_value));
+ do {
+ any_removals = false;
+ auto it = current_range.lower_bound(search_interval);
+ if (it == current_range.end()) {
+ return;
+ }
+ auto end_it = current_range.upper_bound(search_interval);
+ for (; it != end_it; it++) {
+ if (it->second <= 0) {
+ any_removals = true;
+ current_range.erase(it);
+ break;
+ }
+ }
+ } while (any_removals);
+ }
+
+ IntervalSet base_set;
+ OverlapCounter mapping_overlaps;
+ Tegra::MaxwellDeviceMemoryManager& device_memory;
+ std::mutex guard;
+};
+
+HeapMapper::HeapMapper(VAddr start_vaddress, DAddr start_daddress, size_t size, Core::Asid asid,
+ Tegra::Host1x::Host1x& host1x)
+ : m_vaddress{start_vaddress}, m_daddress{start_daddress}, m_size{size}, m_asid{asid} {
+ m_internal = std::make_unique<HeapMapperInternal>(host1x);
+}
+
+HeapMapper::~HeapMapper() {
+ m_internal->device_memory.Unmap(m_daddress, m_size);
+}
+
+DAddr HeapMapper::Map(VAddr start, size_t size) {
+ std::scoped_lock lk(m_internal->guard);
+ m_internal->base_set.clear();
+ const IntervalType interval{start, start + size};
+ m_internal->base_set.insert(interval);
+ m_internal->ForEachInOverlapCounter(m_internal->mapping_overlaps, start, size,
+ [this](VAddr start_addr, VAddr end_addr, int) {
+ const IntervalType other{start_addr, end_addr};
+ m_internal->base_set.subtract(other);
+ });
+ if (!m_internal->base_set.empty()) {
+ auto it = m_internal->base_set.begin();
+ auto end_it = m_internal->base_set.end();
+ for (; it != end_it; it++) {
+ const VAddr inter_addr_end = it->upper();
+ const VAddr inter_addr = it->lower();
+ const size_t offset = inter_addr - m_vaddress;
+ const size_t sub_size = inter_addr_end - inter_addr;
+ m_internal->device_memory.Map(m_daddress + offset, m_vaddress + offset, sub_size,
+ m_asid);
+ }
+ }
+ m_internal->mapping_overlaps += std::make_pair(interval, 1);
+ m_internal->base_set.clear();
+ return m_daddress + (start - m_vaddress);
+}
+
+void HeapMapper::Unmap(VAddr start, size_t size) {
+ std::scoped_lock lk(m_internal->guard);
+ m_internal->base_set.clear();
+ m_internal->ForEachInOverlapCounter(m_internal->mapping_overlaps, start, size,
+ [this](VAddr start_addr, VAddr end_addr, int value) {
+ if (value <= 1) {
+ const IntervalType other{start_addr, end_addr};
+ m_internal->base_set.insert(other);
+ }
+ });
+ if (!m_internal->base_set.empty()) {
+ auto it = m_internal->base_set.begin();
+ auto end_it = m_internal->base_set.end();
+ for (; it != end_it; it++) {
+ const VAddr inter_addr_end = it->upper();
+ const VAddr inter_addr = it->lower();
+ const size_t offset = inter_addr - m_vaddress;
+ const size_t sub_size = inter_addr_end - inter_addr;
+ m_internal->device_memory.Unmap(m_daddress + offset, sub_size);
+ }
+ }
+ const IntervalType to_remove{start, start + size};
+ m_internal->RemoveEachInOverlapCounter(m_internal->mapping_overlaps, to_remove, -1);
+ m_internal->base_set.clear();
+}
+
+} // namespace Service::Nvidia::NvCore
diff --git a/src/core/hle/service/nvdrv/core/heap_mapper.h b/src/core/hle/service/nvdrv/core/heap_mapper.h
new file mode 100644
index 000000000..491a12e4f
--- /dev/null
+++ b/src/core/hle/service/nvdrv/core/heap_mapper.h
@@ -0,0 +1,49 @@
+// SPDX-FileCopyrightText: 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#pragma once
+
+#include <memory>
+
+#include "common/common_types.h"
+#include "core/device_memory_manager.h"
+
+namespace Tegra::Host1x {
+class Host1x;
+} // namespace Tegra::Host1x
+
+namespace Service::Nvidia::NvCore {
+
+class HeapMapper {
+public:
+ HeapMapper(VAddr start_vaddress, DAddr start_daddress, size_t size, Core::Asid asid,
+ Tegra::Host1x::Host1x& host1x);
+ ~HeapMapper();
+
+ bool IsInBounds(VAddr start, size_t size) const {
+ VAddr end = start + size;
+ return start >= m_vaddress && end <= (m_vaddress + m_size);
+ }
+
+ DAddr Map(VAddr start, size_t size);
+
+ void Unmap(VAddr start, size_t size);
+
+ DAddr GetRegionStart() const {
+ return m_daddress;
+ }
+
+ size_t GetRegionSize() const {
+ return m_size;
+ }
+
+private:
+ struct HeapMapperInternal;
+ VAddr m_vaddress;
+ DAddr m_daddress;
+ size_t m_size;
+ Core::Asid m_asid;
+ std::unique_ptr<HeapMapperInternal> m_internal;
+};
+
+} // namespace Service::Nvidia::NvCore
diff --git a/src/core/hle/service/nvdrv/core/nvmap.cpp b/src/core/hle/service/nvdrv/core/nvmap.cpp
index 0ca05257e..1b59c6b15 100644
--- a/src/core/hle/service/nvdrv/core/nvmap.cpp
+++ b/src/core/hle/service/nvdrv/core/nvmap.cpp
@@ -2,14 +2,19 @@
// SPDX-FileCopyrightText: 2022 Skyline Team and Contributors
// SPDX-License-Identifier: GPL-3.0-or-later
+#include <functional>
+
#include "common/alignment.h"
#include "common/assert.h"
#include "common/logging/log.h"
+#include "core/hle/service/nvdrv/core/container.h"
+#include "core/hle/service/nvdrv/core/heap_mapper.h"
#include "core/hle/service/nvdrv/core/nvmap.h"
#include "core/memory.h"
#include "video_core/host1x/host1x.h"
using Core::Memory::YUZU_PAGESIZE;
+constexpr size_t BIG_PAGE_SIZE = YUZU_PAGESIZE * 16;
namespace Service::Nvidia::NvCore {
NvMap::Handle::Handle(u64 size_, Id id_)
@@ -17,9 +22,9 @@ NvMap::Handle::Handle(u64 size_, Id id_)
flags.raw = 0;
}
-NvResult NvMap::Handle::Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress) {
+NvResult NvMap::Handle::Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress,
+ NvCore::SessionId pSessionId) {
std::scoped_lock lock(mutex);
-
// Handles cannot be allocated twice
if (allocated) {
return NvResult::AccessDenied;
@@ -28,6 +33,7 @@ NvResult NvMap::Handle::Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress)
flags = pFlags;
kind = pKind;
align = pAlign < YUZU_PAGESIZE ? YUZU_PAGESIZE : pAlign;
+ session_id = pSessionId;
// This flag is only applicable for handles with an address passed
if (pAddress) {
@@ -63,7 +69,7 @@ NvResult NvMap::Handle::Duplicate(bool internal_session) {
return NvResult::Success;
}
-NvMap::NvMap(Tegra::Host1x::Host1x& host1x_) : host1x{host1x_} {}
+NvMap::NvMap(Container& core_, Tegra::Host1x::Host1x& host1x_) : host1x{host1x_}, core{core_} {}
void NvMap::AddHandle(std::shared_ptr<Handle> handle_description) {
std::scoped_lock lock(handles_lock);
@@ -78,12 +84,30 @@ void NvMap::UnmapHandle(Handle& handle_description) {
handle_description.unmap_queue_entry.reset();
}
+ // Free and unmap the handle from Host1x GMMU
+ if (handle_description.pin_virt_address) {
+ host1x.GMMU().Unmap(static_cast<GPUVAddr>(handle_description.pin_virt_address),
+ handle_description.aligned_size);
+ host1x.Allocator().Free(handle_description.pin_virt_address,
+ static_cast<u32>(handle_description.aligned_size));
+ handle_description.pin_virt_address = 0;
+ }
+
// Free and unmap the handle from the SMMU
- host1x.MemoryManager().Unmap(static_cast<GPUVAddr>(handle_description.pin_virt_address),
- handle_description.aligned_size);
- host1x.Allocator().Free(handle_description.pin_virt_address,
- static_cast<u32>(handle_description.aligned_size));
- handle_description.pin_virt_address = 0;
+ const size_t map_size = handle_description.aligned_size;
+ if (!handle_description.in_heap) {
+ auto& smmu = host1x.MemoryManager();
+ size_t aligned_up = Common::AlignUp(map_size, BIG_PAGE_SIZE);
+ smmu.Unmap(handle_description.d_address, map_size);
+ smmu.Free(handle_description.d_address, static_cast<size_t>(aligned_up));
+ handle_description.d_address = 0;
+ return;
+ }
+ const VAddr vaddress = handle_description.address;
+ auto* session = core.GetSession(handle_description.session_id);
+ session->mapper->Unmap(vaddress, map_size);
+ handle_description.d_address = 0;
+ handle_description.in_heap = false;
}
bool NvMap::TryRemoveHandle(const Handle& handle_description) {
@@ -124,22 +148,33 @@ std::shared_ptr<NvMap::Handle> NvMap::GetHandle(Handle::Id handle) {
}
}
-VAddr NvMap::GetHandleAddress(Handle::Id handle) {
+DAddr NvMap::GetHandleAddress(Handle::Id handle) {
std::scoped_lock lock(handles_lock);
try {
- return handles.at(handle)->address;
+ return handles.at(handle)->d_address;
} catch (std::out_of_range&) {
return 0;
}
}
-u32 NvMap::PinHandle(NvMap::Handle::Id handle) {
+DAddr NvMap::PinHandle(NvMap::Handle::Id handle, bool low_area_pin) {
auto handle_description{GetHandle(handle)};
if (!handle_description) [[unlikely]] {
return 0;
}
std::scoped_lock lock(handle_description->mutex);
+ const auto map_low_area = [&] {
+ if (handle_description->pin_virt_address == 0) {
+ auto& gmmu_allocator = host1x.Allocator();
+ auto& gmmu = host1x.GMMU();
+ u32 address =
+ gmmu_allocator.Allocate(static_cast<u32>(handle_description->aligned_size));
+ gmmu.Map(static_cast<GPUVAddr>(address), handle_description->d_address,
+ handle_description->aligned_size);
+ handle_description->pin_virt_address = address;
+ }
+ };
if (!handle_description->pins) {
// If we're in the unmap queue we can just remove ourselves and return since we're already
// mapped
@@ -151,37 +186,58 @@ u32 NvMap::PinHandle(NvMap::Handle::Id handle) {
unmap_queue.erase(*handle_description->unmap_queue_entry);
handle_description->unmap_queue_entry.reset();
+ if (low_area_pin) {
+ map_low_area();
+ handle_description->pins++;
+ return static_cast<DAddr>(handle_description->pin_virt_address);
+ }
+
handle_description->pins++;
- return handle_description->pin_virt_address;
+ return handle_description->d_address;
}
}
+ using namespace std::placeholders;
// If not then allocate some space and map it
- u32 address{};
- auto& smmu_allocator = host1x.Allocator();
- auto& smmu_memory_manager = host1x.MemoryManager();
- while ((address = smmu_allocator.Allocate(
- static_cast<u32>(handle_description->aligned_size))) == 0) {
- // Free handles until the allocation succeeds
- std::scoped_lock queueLock(unmap_queue_lock);
- if (auto freeHandleDesc{unmap_queue.front()}) {
- // Handles in the unmap queue are guaranteed not to be pinned so don't bother
- // checking if they are before unmapping
- std::scoped_lock freeLock(freeHandleDesc->mutex);
- if (handle_description->pin_virt_address)
- UnmapHandle(*freeHandleDesc);
- } else {
- LOG_CRITICAL(Service_NVDRV, "Ran out of SMMU address space!");
+ DAddr address{};
+ auto& smmu = host1x.MemoryManager();
+ auto* session = core.GetSession(handle_description->session_id);
+ const VAddr vaddress = handle_description->address;
+ const size_t map_size = handle_description->aligned_size;
+ if (session->has_preallocated_area && session->mapper->IsInBounds(vaddress, map_size)) {
+ handle_description->d_address = session->mapper->Map(vaddress, map_size);
+ handle_description->in_heap = true;
+ } else {
+ size_t aligned_up = Common::AlignUp(map_size, BIG_PAGE_SIZE);
+ while ((address = smmu.Allocate(aligned_up)) == 0) {
+ // Free handles until the allocation succeeds
+ std::scoped_lock queueLock(unmap_queue_lock);
+ if (auto freeHandleDesc{unmap_queue.front()}) {
+ // Handles in the unmap queue are guaranteed not to be pinned so don't bother
+ // checking if they are before unmapping
+ std::scoped_lock freeLock(freeHandleDesc->mutex);
+ if (handle_description->d_address)
+ UnmapHandle(*freeHandleDesc);
+ } else {
+ LOG_CRITICAL(Service_NVDRV, "Ran out of SMMU address space!");
+ }
}
+
+ handle_description->d_address = address;
+ smmu.Map(address, vaddress, map_size, session->asid, true);
+ handle_description->in_heap = false;
}
+ }
- smmu_memory_manager.Map(static_cast<GPUVAddr>(address), handle_description->address,
- handle_description->aligned_size);
- handle_description->pin_virt_address = address;
+ if (low_area_pin) {
+ map_low_area();
}
handle_description->pins++;
- return handle_description->pin_virt_address;
+ if (low_area_pin) {
+ return static_cast<DAddr>(handle_description->pin_virt_address);
+ }
+ return handle_description->d_address;
}
void NvMap::UnpinHandle(Handle::Id handle) {
@@ -232,7 +288,7 @@ std::optional<NvMap::FreeInfo> NvMap::FreeHandle(Handle::Id handle, bool interna
LOG_WARNING(Service_NVDRV, "User duplicate count imbalance detected!");
} else if (handle_description->dupes == 0) {
// Force unmap the handle
- if (handle_description->pin_virt_address) {
+ if (handle_description->d_address) {
std::scoped_lock queueLock(unmap_queue_lock);
UnmapHandle(*handle_description);
}
diff --git a/src/core/hle/service/nvdrv/core/nvmap.h b/src/core/hle/service/nvdrv/core/nvmap.h
index a8e573890..d7f695845 100644
--- a/src/core/hle/service/nvdrv/core/nvmap.h
+++ b/src/core/hle/service/nvdrv/core/nvmap.h
@@ -14,6 +14,7 @@
#include "common/bit_field.h"
#include "common/common_types.h"
+#include "core/hle/service/nvdrv/core/container.h"
#include "core/hle/service/nvdrv/nvdata.h"
namespace Tegra {
@@ -25,6 +26,8 @@ class Host1x;
} // namespace Tegra
namespace Service::Nvidia::NvCore {
+
+class Container;
/**
* @brief The nvmap core class holds the global state for nvmap and provides methods to manage
* handles
@@ -48,7 +51,7 @@ public:
using Id = u32;
Id id; //!< A globally unique identifier for this handle
- s32 pins{};
+ s64 pins{};
u32 pin_virt_address{};
std::optional<typename std::list<std::shared_ptr<Handle>>::iterator> unmap_queue_entry{};
@@ -61,15 +64,18 @@ public:
} flags{};
static_assert(sizeof(Flags) == sizeof(u32));
- u64 address{}; //!< The memory location in the guest's AS that this handle corresponds to,
- //!< this can also be in the nvdrv tmem
+ VAddr address{}; //!< The memory location in the guest's AS that this handle corresponds to,
+ //!< this can also be in the nvdrv tmem
bool is_shared_mem_mapped{}; //!< If this nvmap has been mapped with the MapSharedMem IPC
//!< call
u8 kind{}; //!< Used for memory compression
bool allocated{}; //!< If the handle has been allocated with `Alloc`
+ bool in_heap{};
+ NvCore::SessionId session_id{};
- u64 dma_map_addr{}; //! remove me after implementing pinning.
+ DAddr d_address{}; //!< The memory location in the device's AS that this handle corresponds
+ //!< to, this can also be in the nvdrv tmem
Handle(u64 size, Id id);
@@ -77,7 +83,8 @@ public:
* @brief Sets up the handle with the given memory config, can allocate memory from the tmem
* if a 0 address is passed
*/
- [[nodiscard]] NvResult Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress);
+ [[nodiscard]] NvResult Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress,
+ NvCore::SessionId pSessionId);
/**
* @brief Increases the dupe counter of the handle for the given session
@@ -108,7 +115,7 @@ public:
bool can_unlock; //!< If the address region is ready to be unlocked
};
- explicit NvMap(Tegra::Host1x::Host1x& host1x);
+ explicit NvMap(Container& core, Tegra::Host1x::Host1x& host1x);
/**
* @brief Creates an unallocated handle of the given size
@@ -117,7 +124,7 @@ public:
std::shared_ptr<Handle> GetHandle(Handle::Id handle);
- VAddr GetHandleAddress(Handle::Id handle);
+ DAddr GetHandleAddress(Handle::Id handle);
/**
* @brief Maps a handle into the SMMU address space
@@ -125,7 +132,7 @@ public:
* number of calls to `UnpinHandle`
* @return The SMMU virtual address that the handle has been mapped to
*/
- u32 PinHandle(Handle::Id handle);
+ DAddr PinHandle(Handle::Id handle, bool low_area_pin);
/**
* @brief When this has been called an equal number of times to `PinHandle` for the supplied
@@ -172,5 +179,7 @@ private:
* @return If the handle was removed from the map
*/
bool TryRemoveHandle(const Handle& handle_description);
+
+ Container& core;
};
} // namespace Service::Nvidia::NvCore
diff --git a/src/core/hle/service/nvdrv/devices/nvdevice.h b/src/core/hle/service/nvdrv/devices/nvdevice.h
index a04538d5d..8adaddc60 100644
--- a/src/core/hle/service/nvdrv/devices/nvdevice.h
+++ b/src/core/hle/service/nvdrv/devices/nvdevice.h
@@ -7,6 +7,7 @@
#include <vector>
#include "common/common_types.h"
+#include "core/hle/service/nvdrv/core/container.h"
#include "core/hle/service/nvdrv/nvdata.h"
namespace Core {
@@ -62,7 +63,7 @@ public:
* Called once a device is opened
* @param fd The device fd
*/
- virtual void OnOpen(DeviceFD fd) = 0;
+ virtual void OnOpen(NvCore::SessionId session_id, DeviceFD fd) = 0;
/**
* Called once a device is closed
diff --git a/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp b/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp
index 05a43d8dc..c1ebbd62d 100644
--- a/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp
@@ -35,14 +35,14 @@ NvResult nvdisp_disp0::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> in
return NvResult::NotImplemented;
}
-void nvdisp_disp0::OnOpen(DeviceFD fd) {}
+void nvdisp_disp0::OnOpen(NvCore::SessionId session_id, DeviceFD fd) {}
void nvdisp_disp0::OnClose(DeviceFD fd) {}
void nvdisp_disp0::flip(u32 buffer_handle, u32 offset, android::PixelFormat format, u32 width,
u32 height, u32 stride, android::BufferTransformFlags transform,
const Common::Rectangle<int>& crop_rect,
std::array<Service::Nvidia::NvFence, 4>& fences, u32 num_fences) {
- const VAddr addr = nvmap.GetHandleAddress(buffer_handle);
+ const DAddr addr = nvmap.GetHandleAddress(buffer_handle);
LOG_TRACE(Service,
"Drawing from address {:X} offset {:08X} Width {} Height {} Stride {} Format {}",
addr, offset, width, height, stride, format);
diff --git a/src/core/hle/service/nvdrv/devices/nvdisp_disp0.h b/src/core/hle/service/nvdrv/devices/nvdisp_disp0.h
index daee05fe8..5f13a50a2 100644
--- a/src/core/hle/service/nvdrv/devices/nvdisp_disp0.h
+++ b/src/core/hle/service/nvdrv/devices/nvdisp_disp0.h
@@ -32,7 +32,7 @@ public:
NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output,
std::span<u8> inline_output) override;
- void OnOpen(DeviceFD fd) override;
+ void OnOpen(NvCore::SessionId session_id, DeviceFD fd) override;
void OnClose(DeviceFD fd) override;
/// Performs a screen flip, drawing the buffer pointed to by the handle.
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp
index 6b3639008..e6646ba04 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp
@@ -86,7 +86,7 @@ NvResult nvhost_as_gpu::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> i
return NvResult::NotImplemented;
}
-void nvhost_as_gpu::OnOpen(DeviceFD fd) {}
+void nvhost_as_gpu::OnOpen(NvCore::SessionId session_id, DeviceFD fd) {}
void nvhost_as_gpu::OnClose(DeviceFD fd) {}
NvResult nvhost_as_gpu::AllocAsEx(IoctlAllocAsEx& params) {
@@ -206,6 +206,8 @@ void nvhost_as_gpu::FreeMappingLocked(u64 offset) {
static_cast<u32>(aligned_size >> page_size_bits));
}
+ nvmap.UnpinHandle(mapping->handle);
+
// Sparse mappings shouldn't be fully unmapped, just returned to their sparse state
// Only FreeSpace can unmap them fully
if (mapping->sparse_alloc) {
@@ -293,12 +295,12 @@ NvResult nvhost_as_gpu::Remap(std::span<IoctlRemapEntry> entries) {
return NvResult::BadValue;
}
- VAddr cpu_address{static_cast<VAddr>(
- handle->address +
- (static_cast<u64>(entry.handle_offset_big_pages) << vm.big_page_size_bits))};
+ DAddr base = nvmap.PinHandle(entry.handle, false);
+ DAddr device_address{static_cast<DAddr>(
+ base + (static_cast<u64>(entry.handle_offset_big_pages) << vm.big_page_size_bits))};
- gmmu->Map(virtual_address, cpu_address, size, static_cast<Tegra::PTEKind>(entry.kind),
- use_big_pages);
+ gmmu->Map(virtual_address, device_address, size,
+ static_cast<Tegra::PTEKind>(entry.kind), use_big_pages);
}
}
@@ -331,9 +333,9 @@ NvResult nvhost_as_gpu::MapBufferEx(IoctlMapBufferEx& params) {
}
u64 gpu_address{static_cast<u64>(params.offset + params.buffer_offset)};
- VAddr cpu_address{mapping->ptr + params.buffer_offset};
+ VAddr device_address{mapping->ptr + params.buffer_offset};
- gmmu->Map(gpu_address, cpu_address, params.mapping_size,
+ gmmu->Map(gpu_address, device_address, params.mapping_size,
static_cast<Tegra::PTEKind>(params.kind), mapping->big_page);
return NvResult::Success;
@@ -349,7 +351,8 @@ NvResult nvhost_as_gpu::MapBufferEx(IoctlMapBufferEx& params) {
return NvResult::BadValue;
}
- VAddr cpu_address{static_cast<VAddr>(handle->address + params.buffer_offset)};
+ DAddr device_address{
+ static_cast<DAddr>(nvmap.PinHandle(params.handle, false) + params.buffer_offset)};
u64 size{params.mapping_size ? params.mapping_size : handle->orig_size};
bool big_page{[&]() {
@@ -373,15 +376,14 @@ NvResult nvhost_as_gpu::MapBufferEx(IoctlMapBufferEx& params) {
}
const bool use_big_pages = alloc->second.big_pages && big_page;
- gmmu->Map(params.offset, cpu_address, size, static_cast<Tegra::PTEKind>(params.kind),
+ gmmu->Map(params.offset, device_address, size, static_cast<Tegra::PTEKind>(params.kind),
use_big_pages);
- auto mapping{std::make_shared<Mapping>(cpu_address, params.offset, size, true,
- use_big_pages, alloc->second.sparse)};
+ auto mapping{std::make_shared<Mapping>(params.handle, device_address, params.offset, size,
+ true, use_big_pages, alloc->second.sparse)};
alloc->second.mappings.push_back(mapping);
mapping_map[params.offset] = mapping;
} else {
-
auto& allocator{big_page ? *vm.big_page_allocator : *vm.small_page_allocator};
u32 page_size{big_page ? vm.big_page_size : VM::YUZU_PAGESIZE};
u32 page_size_bits{big_page ? vm.big_page_size_bits : VM::PAGE_SIZE_BITS};
@@ -394,11 +396,11 @@ NvResult nvhost_as_gpu::MapBufferEx(IoctlMapBufferEx& params) {
return NvResult::InsufficientMemory;
}
- gmmu->Map(params.offset, cpu_address, Common::AlignUp(size, page_size),
+ gmmu->Map(params.offset, device_address, Common::AlignUp(size, page_size),
static_cast<Tegra::PTEKind>(params.kind), big_page);
- auto mapping{
- std::make_shared<Mapping>(cpu_address, params.offset, size, false, big_page, false)};
+ auto mapping{std::make_shared<Mapping>(params.handle, device_address, params.offset, size,
+ false, big_page, false)};
mapping_map[params.offset] = mapping;
}
@@ -433,6 +435,8 @@ NvResult nvhost_as_gpu::UnmapBuffer(IoctlUnmapBuffer& params) {
gmmu->Unmap(params.offset, mapping->size);
}
+ nvmap.UnpinHandle(mapping->handle);
+
mapping_map.erase(params.offset);
} catch (const std::out_of_range&) {
LOG_WARNING(Service_NVDRV, "Couldn't find region to unmap at 0x{:X}", params.offset);
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h
index 79a21683d..7d0a99988 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h
+++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h
@@ -55,7 +55,7 @@ public:
NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output,
std::span<u8> inline_output) override;
- void OnOpen(DeviceFD fd) override;
+ void OnOpen(NvCore::SessionId session_id, DeviceFD fd) override;
void OnClose(DeviceFD fd) override;
Kernel::KEvent* QueryEvent(u32 event_id) override;
@@ -159,16 +159,18 @@ private:
NvCore::NvMap& nvmap;
struct Mapping {
- VAddr ptr;
+ NvCore::NvMap::Handle::Id handle;
+ DAddr ptr;
u64 offset;
u64 size;
bool fixed;
bool big_page; // Only valid if fixed == false
bool sparse_alloc;
- Mapping(VAddr ptr_, u64 offset_, u64 size_, bool fixed_, bool big_page_, bool sparse_alloc_)
- : ptr(ptr_), offset(offset_), size(size_), fixed(fixed_), big_page(big_page_),
- sparse_alloc(sparse_alloc_) {}
+ Mapping(NvCore::NvMap::Handle::Id handle_, DAddr ptr_, u64 offset_, u64 size_, bool fixed_,
+ bool big_page_, bool sparse_alloc_)
+ : handle(handle_), ptr(ptr_), offset(offset_), size(size_), fixed(fixed_),
+ big_page(big_page_), sparse_alloc(sparse_alloc_) {}
};
struct Allocation {
@@ -212,9 +214,6 @@ private:
bool initialised{};
} vm;
std::shared_ptr<Tegra::MemoryManager> gmmu;
-
- // s32 channel{};
- // u32 big_page_size{VM::DEFAULT_BIG_PAGE_SIZE};
};
} // namespace Service::Nvidia::Devices
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp
index b8dd34e24..250d01de3 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp
@@ -76,7 +76,7 @@ NvResult nvhost_ctrl::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> inp
return NvResult::NotImplemented;
}
-void nvhost_ctrl::OnOpen(DeviceFD fd) {}
+void nvhost_ctrl::OnOpen(NvCore::SessionId session_id, DeviceFD fd) {}
void nvhost_ctrl::OnClose(DeviceFD fd) {}
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h
index 992124b60..403f1a746 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h
+++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h
@@ -32,7 +32,7 @@ public:
NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output,
std::span<u8> inline_output) override;
- void OnOpen(DeviceFD fd) override;
+ void OnOpen(NvCore::SessionId session_id, DeviceFD fd) override;
void OnClose(DeviceFD fd) override;
Kernel::KEvent* QueryEvent(u32 event_id) override;
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp
index 3e0c96456..ddd85678b 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp
@@ -82,7 +82,7 @@ NvResult nvhost_ctrl_gpu::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8>
return NvResult::NotImplemented;
}
-void nvhost_ctrl_gpu::OnOpen(DeviceFD fd) {}
+void nvhost_ctrl_gpu::OnOpen(NvCore::SessionId session_id, DeviceFD fd) {}
void nvhost_ctrl_gpu::OnClose(DeviceFD fd) {}
NvResult nvhost_ctrl_gpu::GetCharacteristics1(IoctlCharacteristics& params) {
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h
index d170299bd..d2ab05b21 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h
+++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h
@@ -28,7 +28,7 @@ public:
NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output,
std::span<u8> inline_output) override;
- void OnOpen(DeviceFD fd) override;
+ void OnOpen(NvCore::SessionId session_id, DeviceFD fd) override;
void OnClose(DeviceFD fd) override;
Kernel::KEvent* QueryEvent(u32 event_id) override;
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp
index b0395c2f0..bf12d69a5 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp
@@ -120,7 +120,7 @@ NvResult nvhost_gpu::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> inpu
return NvResult::NotImplemented;
}
-void nvhost_gpu::OnOpen(DeviceFD fd) {}
+void nvhost_gpu::OnOpen(NvCore::SessionId session_id, DeviceFD fd) {}
void nvhost_gpu::OnClose(DeviceFD fd) {}
NvResult nvhost_gpu::SetNVMAPfd(IoctlSetNvmapFD& params) {
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_gpu.h b/src/core/hle/service/nvdrv/devices/nvhost_gpu.h
index 88fd228ff..e34a978db 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_gpu.h
+++ b/src/core/hle/service/nvdrv/devices/nvhost_gpu.h
@@ -47,7 +47,7 @@ public:
NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output,
std::span<u8> inline_output) override;
- void OnOpen(DeviceFD fd) override;
+ void OnOpen(NvCore::SessionId session_id, DeviceFD fd) override;
void OnClose(DeviceFD fd) override;
Kernel::KEvent* QueryEvent(u32 event_id) override;
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp
index f43914e1b..2c0ac2a46 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp
@@ -35,7 +35,7 @@ NvResult nvhost_nvdec::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> in
case 0x7:
return WrapFixed(this, &nvhost_nvdec::SetSubmitTimeout, input, output);
case 0x9:
- return WrapFixedVariable(this, &nvhost_nvdec::MapBuffer, input, output);
+ return WrapFixedVariable(this, &nvhost_nvdec::MapBuffer, input, output, fd);
case 0xa:
return WrapFixedVariable(this, &nvhost_nvdec::UnmapBuffer, input, output);
default:
@@ -68,9 +68,10 @@ NvResult nvhost_nvdec::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> in
return NvResult::NotImplemented;
}
-void nvhost_nvdec::OnOpen(DeviceFD fd) {
+void nvhost_nvdec::OnOpen(NvCore::SessionId session_id, DeviceFD fd) {
LOG_INFO(Service_NVDRV, "NVDEC video stream started");
system.SetNVDECActive(true);
+ sessions[fd] = session_id;
}
void nvhost_nvdec::OnClose(DeviceFD fd) {
@@ -81,6 +82,10 @@ void nvhost_nvdec::OnClose(DeviceFD fd) {
system.GPU().ClearCdmaInstance(iter->second);
}
system.SetNVDECActive(false);
+ auto it = sessions.find(fd);
+ if (it != sessions.end()) {
+ sessions.erase(it);
+ }
}
} // namespace Service::Nvidia::Devices
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.h b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.h
index ad2233c49..627686757 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.h
+++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.h
@@ -20,7 +20,7 @@ public:
NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output,
std::span<u8> inline_output) override;
- void OnOpen(DeviceFD fd) override;
+ void OnOpen(NvCore::SessionId session_id, DeviceFD fd) override;
void OnClose(DeviceFD fd) override;
};
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp
index 74c701b95..a0a7bfa40 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp
@@ -8,6 +8,7 @@
#include "common/common_types.h"
#include "common/logging/log.h"
#include "core/core.h"
+#include "core/hle/kernel/k_process.h"
#include "core/hle/service/nvdrv/core/container.h"
#include "core/hle/service/nvdrv/core/nvmap.h"
#include "core/hle/service/nvdrv/core/syncpoint_manager.h"
@@ -95,6 +96,8 @@ NvResult nvhost_nvdec_common::Submit(IoctlSubmit& params, std::span<u8> data, De
offset += SliceVectors(data, fence_thresholds, params.fence_count, offset);
auto& gpu = system.GPU();
+ auto* session = core.GetSession(sessions[fd]);
+
if (gpu.UseNvdec()) {
for (std::size_t i = 0; i < syncpt_increments.size(); i++) {
const SyncptIncr& syncpt_incr = syncpt_increments[i];
@@ -106,8 +109,8 @@ NvResult nvhost_nvdec_common::Submit(IoctlSubmit& params, std::span<u8> data, De
const auto object = nvmap.GetHandle(cmd_buffer.memory_id);
ASSERT_OR_EXECUTE(object, return NvResult::InvalidState;);
Tegra::ChCommandHeaderList cmdlist(cmd_buffer.word_count);
- system.ApplicationMemory().ReadBlock(object->address + cmd_buffer.offset, cmdlist.data(),
- cmdlist.size() * sizeof(u32));
+ session->process->GetMemory().ReadBlock(object->address + cmd_buffer.offset, cmdlist.data(),
+ cmdlist.size() * sizeof(u32));
gpu.PushCommandBuffer(core.Host1xDeviceFile().fd_to_id[fd], cmdlist);
}
// Some games expect command_buffers to be written back
@@ -133,10 +136,12 @@ NvResult nvhost_nvdec_common::GetWaitbase(IoctlGetWaitbase& params) {
return NvResult::Success;
}
-NvResult nvhost_nvdec_common::MapBuffer(IoctlMapBuffer& params, std::span<MapBufferEntry> entries) {
+NvResult nvhost_nvdec_common::MapBuffer(IoctlMapBuffer& params, std::span<MapBufferEntry> entries,
+ DeviceFD fd) {
const size_t num_entries = std::min(params.num_entries, static_cast<u32>(entries.size()));
for (size_t i = 0; i < num_entries; i++) {
- entries[i].map_address = nvmap.PinHandle(entries[i].map_handle);
+ DAddr pin_address = nvmap.PinHandle(entries[i].map_handle, true);
+ entries[i].map_address = static_cast<u32>(pin_address);
}
return NvResult::Success;
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h
index 7ce748e18..900db81d2 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h
+++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h
@@ -4,7 +4,9 @@
#pragma once
#include <deque>
+#include <unordered_map>
#include <vector>
+
#include "common/common_types.h"
#include "common/swap.h"
#include "core/hle/service/nvdrv/core/syncpoint_manager.h"
@@ -111,7 +113,7 @@ protected:
NvResult Submit(IoctlSubmit& params, std::span<u8> input, DeviceFD fd);
NvResult GetSyncpoint(IoctlGetSyncpoint& params);
NvResult GetWaitbase(IoctlGetWaitbase& params);
- NvResult MapBuffer(IoctlMapBuffer& params, std::span<MapBufferEntry> entries);
+ NvResult MapBuffer(IoctlMapBuffer& params, std::span<MapBufferEntry> entries, DeviceFD fd);
NvResult UnmapBuffer(IoctlMapBuffer& params, std::span<MapBufferEntry> entries);
NvResult SetSubmitTimeout(u32 timeout);
@@ -125,6 +127,7 @@ protected:
NvCore::NvMap& nvmap;
NvCore::ChannelType channel_type;
std::array<u32, MaxSyncPoints> device_syncpoints{};
+ std::unordered_map<DeviceFD, NvCore::SessionId> sessions;
};
}; // namespace Devices
} // namespace Service::Nvidia
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.cpp b/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.cpp
index 9e6b86458..f87d53f12 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.cpp
@@ -44,7 +44,7 @@ NvResult nvhost_nvjpg::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> in
return NvResult::NotImplemented;
}
-void nvhost_nvjpg::OnOpen(DeviceFD fd) {}
+void nvhost_nvjpg::OnOpen(NvCore::SessionId session_id, DeviceFD fd) {}
void nvhost_nvjpg::OnClose(DeviceFD fd) {}
NvResult nvhost_nvjpg::SetNVMAPfd(IoctlSetNvmapFD& params) {
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.h b/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.h
index 790c97f6a..def9c254d 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.h
+++ b/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.h
@@ -22,7 +22,7 @@ public:
NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output,
std::span<u8> inline_output) override;
- void OnOpen(DeviceFD fd) override;
+ void OnOpen(NvCore::SessionId session_id, DeviceFD fd) override;
void OnClose(DeviceFD fd) override;
private:
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp b/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp
index 87f8d7c22..bf090f5eb 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp
@@ -33,7 +33,7 @@ NvResult nvhost_vic::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> inpu
case 0x3:
return WrapFixed(this, &nvhost_vic::GetWaitbase, input, output);
case 0x9:
- return WrapFixedVariable(this, &nvhost_vic::MapBuffer, input, output);
+ return WrapFixedVariable(this, &nvhost_vic::MapBuffer, input, output, fd);
case 0xa:
return WrapFixedVariable(this, &nvhost_vic::UnmapBuffer, input, output);
default:
@@ -68,7 +68,9 @@ NvResult nvhost_vic::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> inpu
return NvResult::NotImplemented;
}
-void nvhost_vic::OnOpen(DeviceFD fd) {}
+void nvhost_vic::OnOpen(NvCore::SessionId session_id, DeviceFD fd) {
+ sessions[fd] = session_id;
+}
void nvhost_vic::OnClose(DeviceFD fd) {
auto& host1x_file = core.Host1xDeviceFile();
@@ -76,6 +78,7 @@ void nvhost_vic::OnClose(DeviceFD fd) {
if (iter != host1x_file.fd_to_id.end()) {
system.GPU().ClearCdmaInstance(iter->second);
}
+ sessions.erase(fd);
}
} // namespace Service::Nvidia::Devices
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_vic.h b/src/core/hle/service/nvdrv/devices/nvhost_vic.h
index cadbcb0a5..0cc04354a 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_vic.h
+++ b/src/core/hle/service/nvdrv/devices/nvhost_vic.h
@@ -19,7 +19,7 @@ public:
NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output,
std::span<u8> inline_output) override;
- void OnOpen(DeviceFD fd) override;
+ void OnOpen(NvCore::SessionId session_id, DeviceFD fd) override;
void OnClose(DeviceFD fd) override;
};
} // namespace Service::Nvidia::Devices
diff --git a/src/core/hle/service/nvdrv/devices/nvmap.cpp b/src/core/hle/service/nvdrv/devices/nvmap.cpp
index 71b2e62ec..da61a3bfe 100644
--- a/src/core/hle/service/nvdrv/devices/nvmap.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvmap.cpp
@@ -36,9 +36,9 @@ NvResult nvmap::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> input,
case 0x3:
return WrapFixed(this, &nvmap::IocFromId, input, output);
case 0x4:
- return WrapFixed(this, &nvmap::IocAlloc, input, output);
+ return WrapFixed(this, &nvmap::IocAlloc, input, output, fd);
case 0x5:
- return WrapFixed(this, &nvmap::IocFree, input, output);
+ return WrapFixed(this, &nvmap::IocFree, input, output, fd);
case 0x9:
return WrapFixed(this, &nvmap::IocParam, input, output);
case 0xe:
@@ -67,8 +67,15 @@ NvResult nvmap::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, st
return NvResult::NotImplemented;
}
-void nvmap::OnOpen(DeviceFD fd) {}
-void nvmap::OnClose(DeviceFD fd) {}
+void nvmap::OnOpen(NvCore::SessionId session_id, DeviceFD fd) {
+ sessions[fd] = session_id;
+}
+void nvmap::OnClose(DeviceFD fd) {
+ auto it = sessions.find(fd);
+ if (it != sessions.end()) {
+ sessions.erase(it);
+ }
+}
NvResult nvmap::IocCreate(IocCreateParams& params) {
LOG_DEBUG(Service_NVDRV, "called, size=0x{:08X}", params.size);
@@ -87,7 +94,7 @@ NvResult nvmap::IocCreate(IocCreateParams& params) {
return NvResult::Success;
}
-NvResult nvmap::IocAlloc(IocAllocParams& params) {
+NvResult nvmap::IocAlloc(IocAllocParams& params, DeviceFD fd) {
LOG_DEBUG(Service_NVDRV, "called, addr={:X}", params.address);
if (!params.handle) {
@@ -116,15 +123,15 @@ NvResult nvmap::IocAlloc(IocAllocParams& params) {
return NvResult::InsufficientMemory;
}
- const auto result =
- handle_description->Alloc(params.flags, params.align, params.kind, params.address);
+ const auto result = handle_description->Alloc(params.flags, params.align, params.kind,
+ params.address, sessions[fd]);
if (result != NvResult::Success) {
LOG_CRITICAL(Service_NVDRV, "Object failed to allocate, handle={:08X}", params.handle);
return result;
}
bool is_out_io{};
- ASSERT(system.ApplicationProcess()
- ->GetPageTable()
+ auto process = container.GetSession(sessions[fd])->process;
+ ASSERT(process->GetPageTable()
.LockForMapDeviceAddressSpace(&is_out_io, handle_description->address,
handle_description->size,
Kernel::KMemoryPermission::None, true, false)
@@ -224,7 +231,7 @@ NvResult nvmap::IocParam(IocParamParams& params) {
return NvResult::Success;
}
-NvResult nvmap::IocFree(IocFreeParams& params) {
+NvResult nvmap::IocFree(IocFreeParams& params, DeviceFD fd) {
LOG_DEBUG(Service_NVDRV, "called");
if (!params.handle) {
@@ -233,9 +240,9 @@ NvResult nvmap::IocFree(IocFreeParams& params) {
}
if (auto freeInfo{file.FreeHandle(params.handle, false)}) {
+ auto process = container.GetSession(sessions[fd])->process;
if (freeInfo->can_unlock) {
- ASSERT(system.ApplicationProcess()
- ->GetPageTable()
+ ASSERT(process->GetPageTable()
.UnlockForDeviceAddressSpace(freeInfo->address, freeInfo->size)
.IsSuccess());
}
diff --git a/src/core/hle/service/nvdrv/devices/nvmap.h b/src/core/hle/service/nvdrv/devices/nvmap.h
index 049c11028..d07d85f88 100644
--- a/src/core/hle/service/nvdrv/devices/nvmap.h
+++ b/src/core/hle/service/nvdrv/devices/nvmap.h
@@ -33,7 +33,7 @@ public:
NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output,
std::span<u8> inline_output) override;
- void OnOpen(DeviceFD fd) override;
+ void OnOpen(NvCore::SessionId session_id, DeviceFD fd) override;
void OnClose(DeviceFD fd) override;
enum class HandleParameterType : u32_le {
@@ -100,11 +100,11 @@ public:
static_assert(sizeof(IocGetIdParams) == 8, "IocGetIdParams has wrong size");
NvResult IocCreate(IocCreateParams& params);
- NvResult IocAlloc(IocAllocParams& params);
+ NvResult IocAlloc(IocAllocParams& params, DeviceFD fd);
NvResult IocGetId(IocGetIdParams& params);
NvResult IocFromId(IocFromIdParams& params);
NvResult IocParam(IocParamParams& params);
- NvResult IocFree(IocFreeParams& params);
+ NvResult IocFree(IocFreeParams& params, DeviceFD fd);
private:
/// Id to use for the next handle that is created.
@@ -115,6 +115,7 @@ private:
NvCore::Container& container;
NvCore::NvMap& file;
+ std::unordered_map<DeviceFD, NvCore::SessionId> sessions;
};
} // namespace Service::Nvidia::Devices
diff --git a/src/core/hle/service/nvdrv/nvdrv.cpp b/src/core/hle/service/nvdrv/nvdrv.cpp
index 9e46ee8dd..cb256e5b4 100644
--- a/src/core/hle/service/nvdrv/nvdrv.cpp
+++ b/src/core/hle/service/nvdrv/nvdrv.cpp
@@ -45,13 +45,22 @@ void EventInterface::FreeEvent(Kernel::KEvent* event) {
void LoopProcess(Nvnflinger::Nvnflinger& nvnflinger, Core::System& system) {
auto server_manager = std::make_unique<ServerManager>(system);
auto module = std::make_shared<Module>(system);
- server_manager->RegisterNamedService("nvdrv", std::make_shared<NVDRV>(system, module, "nvdrv"));
- server_manager->RegisterNamedService("nvdrv:a",
- std::make_shared<NVDRV>(system, module, "nvdrv:a"));
- server_manager->RegisterNamedService("nvdrv:s",
- std::make_shared<NVDRV>(system, module, "nvdrv:s"));
- server_manager->RegisterNamedService("nvdrv:t",
- std::make_shared<NVDRV>(system, module, "nvdrv:t"));
+ const auto NvdrvInterfaceFactoryForApplication = [&, module] {
+ return std::make_shared<NVDRV>(system, module, "nvdrv");
+ };
+ const auto NvdrvInterfaceFactoryForApplets = [&, module] {
+ return std::make_shared<NVDRV>(system, module, "nvdrv:a");
+ };
+ const auto NvdrvInterfaceFactoryForSysmodules = [&, module] {
+ return std::make_shared<NVDRV>(system, module, "nvdrv:s");
+ };
+ const auto NvdrvInterfaceFactoryForTesting = [&, module] {
+ return std::make_shared<NVDRV>(system, module, "nvdrv:t");
+ };
+ server_manager->RegisterNamedService("nvdrv", NvdrvInterfaceFactoryForApplication);
+ server_manager->RegisterNamedService("nvdrv:a", NvdrvInterfaceFactoryForApplets);
+ server_manager->RegisterNamedService("nvdrv:s", NvdrvInterfaceFactoryForSysmodules);
+ server_manager->RegisterNamedService("nvdrv:t", NvdrvInterfaceFactoryForTesting);
server_manager->RegisterNamedService("nvmemp", std::make_shared<NVMEMP>(system));
nvnflinger.SetNVDrvInstance(module);
ServerManager::RunServer(std::move(server_manager));
@@ -113,7 +122,7 @@ NvResult Module::VerifyFD(DeviceFD fd) const {
return NvResult::Success;
}
-DeviceFD Module::Open(const std::string& device_name) {
+DeviceFD Module::Open(const std::string& device_name, NvCore::SessionId session_id) {
auto it = builders.find(device_name);
if (it == builders.end()) {
LOG_ERROR(Service_NVDRV, "Trying to open unknown device {}", device_name);
@@ -124,7 +133,7 @@ DeviceFD Module::Open(const std::string& device_name) {
auto& builder = it->second;
auto device = builder(fd)->second;
- device->OnOpen(fd);
+ device->OnOpen(session_id, fd);
return fd;
}
diff --git a/src/core/hle/service/nvdrv/nvdrv.h b/src/core/hle/service/nvdrv/nvdrv.h
index d8622b3ca..c594f0e5e 100644
--- a/src/core/hle/service/nvdrv/nvdrv.h
+++ b/src/core/hle/service/nvdrv/nvdrv.h
@@ -77,7 +77,7 @@ public:
NvResult VerifyFD(DeviceFD fd) const;
/// Opens a device node and returns a file descriptor to it.
- DeviceFD Open(const std::string& device_name);
+ DeviceFD Open(const std::string& device_name, NvCore::SessionId session_id);
/// Sends an ioctl command to the specified file descriptor.
NvResult Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output);
@@ -93,6 +93,10 @@ public:
NvResult QueryEvent(DeviceFD fd, u32 event_id, Kernel::KEvent*& event);
+ NvCore::Container& GetContainer() {
+ return container;
+ }
+
private:
friend class EventInterface;
friend class Service::Nvnflinger::Nvnflinger;
diff --git a/src/core/hle/service/nvdrv/nvdrv_interface.cpp b/src/core/hle/service/nvdrv/nvdrv_interface.cpp
index c8a880e84..6e4825313 100644
--- a/src/core/hle/service/nvdrv/nvdrv_interface.cpp
+++ b/src/core/hle/service/nvdrv/nvdrv_interface.cpp
@@ -3,8 +3,10 @@
// SPDX-License-Identifier: GPL-3.0-or-later
#include "common/logging/log.h"
+#include "common/scope_exit.h"
#include "core/core.h"
#include "core/hle/kernel/k_event.h"
+#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_readable_event.h"
#include "core/hle/service/ipc_helpers.h"
#include "core/hle/service/nvdrv/nvdata.h"
@@ -37,7 +39,7 @@ void NVDRV::Open(HLERequestContext& ctx) {
return;
}
- DeviceFD fd = nvdrv->Open(device_name);
+ DeviceFD fd = nvdrv->Open(device_name, session_id);
rb.Push<DeviceFD>(fd);
rb.PushEnum(fd != INVALID_NVDRV_FD ? NvResult::Success : NvResult::FileOperationFailed);
@@ -150,12 +152,29 @@ void NVDRV::Close(HLERequestContext& ctx) {
void NVDRV::Initialize(HLERequestContext& ctx) {
LOG_WARNING(Service_NVDRV, "(STUBBED) called");
+ IPC::ResponseBuilder rb{ctx, 3};
+ SCOPE_EXIT({
+ rb.Push(ResultSuccess);
+ rb.PushEnum(NvResult::Success);
+ });
- is_initialized = true;
+ if (is_initialized) {
+ // No need to initialize again
+ return;
+ }
- IPC::ResponseBuilder rb{ctx, 3};
- rb.Push(ResultSuccess);
- rb.PushEnum(NvResult::Success);
+ IPC::RequestParser rp{ctx};
+ const auto process_handle{ctx.GetCopyHandle(0)};
+ // The transfer memory is lent to nvdrv as a work buffer since nvdrv is
+ // unable to allocate as much memory on its own. For HLE it's unnecessary to handle it
+ [[maybe_unused]] const auto transfer_memory_handle{ctx.GetCopyHandle(1)};
+ [[maybe_unused]] const auto transfer_memory_size = rp.Pop<u32>();
+
+ auto& container = nvdrv->GetContainer();
+ auto process = ctx.GetObjectFromHandle<Kernel::KProcess>(process_handle);
+ session_id = container.OpenSession(process.GetPointerUnsafe());
+
+ is_initialized = true;
}
void NVDRV::QueryEvent(HLERequestContext& ctx) {
@@ -242,6 +261,9 @@ NVDRV::NVDRV(Core::System& system_, std::shared_ptr<Module> nvdrv_, const char*
RegisterHandlers(functions);
}
-NVDRV::~NVDRV() = default;
+NVDRV::~NVDRV() {
+ auto& container = nvdrv->GetContainer();
+ container.CloseSession(session_id);
+}
} // namespace Service::Nvidia
diff --git a/src/core/hle/service/nvdrv/nvdrv_interface.h b/src/core/hle/service/nvdrv/nvdrv_interface.h
index 6e98115dc..f2195ae1e 100644
--- a/src/core/hle/service/nvdrv/nvdrv_interface.h
+++ b/src/core/hle/service/nvdrv/nvdrv_interface.h
@@ -35,6 +35,7 @@ private:
u64 pid{};
bool is_initialized{};
+ NvCore::SessionId session_id{};
Common::ScratchBuffer<u8> output_buffer;
Common::ScratchBuffer<u8> inline_output_buffer;
};
diff --git a/src/core/hle/service/nvnflinger/fb_share_buffer_manager.cpp b/src/core/hle/service/nvnflinger/fb_share_buffer_manager.cpp
index 2fef6cc1a..86e272b41 100644
--- a/src/core/hle/service/nvnflinger/fb_share_buffer_manager.cpp
+++ b/src/core/hle/service/nvnflinger/fb_share_buffer_manager.cpp
@@ -87,19 +87,20 @@ Result CreateNvMapHandle(u32* out_nv_map_handle, Nvidia::Devices::nvmap& nvmap,
R_SUCCEED();
}
-Result FreeNvMapHandle(Nvidia::Devices::nvmap& nvmap, u32 handle) {
+Result FreeNvMapHandle(Nvidia::Devices::nvmap& nvmap, u32 handle, Nvidia::DeviceFD nvmap_fd) {
// Free the handle.
Nvidia::Devices::nvmap::IocFreeParams free_params{
.handle = handle,
};
- R_UNLESS(nvmap.IocFree(free_params) == Nvidia::NvResult::Success, VI::ResultOperationFailed);
+ R_UNLESS(nvmap.IocFree(free_params, nvmap_fd) == Nvidia::NvResult::Success,
+ VI::ResultOperationFailed);
// We succeeded.
R_SUCCEED();
}
Result AllocNvMapHandle(Nvidia::Devices::nvmap& nvmap, u32 handle, Common::ProcessAddress buffer,
- u32 size) {
+ u32 size, Nvidia::DeviceFD nvmap_fd) {
// Assign the allocated memory to the handle.
Nvidia::Devices::nvmap::IocAllocParams alloc_params{
.handle = handle,
@@ -109,16 +110,16 @@ Result AllocNvMapHandle(Nvidia::Devices::nvmap& nvmap, u32 handle, Common::Proce
.kind = 0,
.address = GetInteger(buffer),
};
- R_UNLESS(nvmap.IocAlloc(alloc_params) == Nvidia::NvResult::Success, VI::ResultOperationFailed);
+ R_UNLESS(nvmap.IocAlloc(alloc_params, nvmap_fd) == Nvidia::NvResult::Success,
+ VI::ResultOperationFailed);
// We succeeded.
R_SUCCEED();
}
-Result AllocateHandleForBuffer(u32* out_handle, Nvidia::Module& nvdrv,
+Result AllocateHandleForBuffer(u32* out_handle, Nvidia::Module& nvdrv, Nvidia::DeviceFD nvmap_fd,
Common::ProcessAddress buffer, u32 size) {
// Get the nvmap device.
- auto nvmap_fd = nvdrv.Open("/dev/nvmap");
auto nvmap = nvdrv.GetDevice<Nvidia::Devices::nvmap>(nvmap_fd);
ASSERT(nvmap != nullptr);
@@ -127,11 +128,11 @@ Result AllocateHandleForBuffer(u32* out_handle, Nvidia::Module& nvdrv,
// Ensure we maintain a clean state on failure.
ON_RESULT_FAILURE {
- ASSERT(R_SUCCEEDED(FreeNvMapHandle(*nvmap, *out_handle)));
+ ASSERT(R_SUCCEEDED(FreeNvMapHandle(*nvmap, *out_handle, nvmap_fd)));
};
// Assign the allocated memory to the handle.
- R_RETURN(AllocNvMapHandle(*nvmap, *out_handle, buffer, size));
+ R_RETURN(AllocNvMapHandle(*nvmap, *out_handle, buffer, size, nvmap_fd));
}
constexpr auto SharedBufferBlockLinearFormat = android::PixelFormat::Rgba8888;
@@ -197,9 +198,13 @@ Result FbShareBufferManager::Initialize(u64* out_buffer_id, u64* out_layer_id, u
std::addressof(m_buffer_page_group), m_system,
SharedBufferSize));
+ auto& container = m_nvdrv->GetContainer();
+ m_session_id = container.OpenSession(m_system.ApplicationProcess());
+ m_nvmap_fd = m_nvdrv->Open("/dev/nvmap", m_session_id);
+
// Create an nvmap handle for the buffer and assign the memory to it.
- R_TRY(AllocateHandleForBuffer(std::addressof(m_buffer_nvmap_handle), *m_nvdrv, map_address,
- SharedBufferSize));
+ R_TRY(AllocateHandleForBuffer(std::addressof(m_buffer_nvmap_handle), *m_nvdrv, m_nvmap_fd,
+ map_address, SharedBufferSize));
// Record the display id.
m_display_id = display_id;
diff --git a/src/core/hle/service/nvnflinger/fb_share_buffer_manager.h b/src/core/hle/service/nvnflinger/fb_share_buffer_manager.h
index c809c01b4..033bf4bbe 100644
--- a/src/core/hle/service/nvnflinger/fb_share_buffer_manager.h
+++ b/src/core/hle/service/nvnflinger/fb_share_buffer_manager.h
@@ -4,6 +4,8 @@
#pragma once
#include "common/math_util.h"
+#include "core/hle/service/nvdrv/core/container.h"
+#include "core/hle/service/nvdrv/nvdata.h"
#include "core/hle/service/nvnflinger/nvnflinger.h"
#include "core/hle/service/nvnflinger/ui/fence.h"
@@ -53,7 +55,8 @@ private:
u64 m_layer_id = 0;
u32 m_buffer_nvmap_handle = 0;
SharedMemoryPoolLayout m_pool_layout = {};
-
+ Nvidia::DeviceFD m_nvmap_fd = {};
+ Nvidia::NvCore::SessionId m_session_id = {};
std::unique_ptr<Kernel::KPageGroup> m_buffer_page_group;
std::mutex m_guard;
diff --git a/src/core/hle/service/nvnflinger/nvnflinger.cpp b/src/core/hle/service/nvnflinger/nvnflinger.cpp
index af6591370..71d6fdb0c 100644
--- a/src/core/hle/service/nvnflinger/nvnflinger.cpp
+++ b/src/core/hle/service/nvnflinger/nvnflinger.cpp
@@ -124,7 +124,7 @@ void Nvnflinger::ShutdownLayers() {
void Nvnflinger::SetNVDrvInstance(std::shared_ptr<Nvidia::Module> instance) {
nvdrv = std::move(instance);
- disp_fd = nvdrv->Open("/dev/nvdisp_disp0");
+ disp_fd = nvdrv->Open("/dev/nvdisp_disp0", {});
}
std::optional<u64> Nvnflinger::OpenDisplay(std::string_view name) {
diff --git a/src/core/hle/service/nvnflinger/ui/graphic_buffer.cpp b/src/core/hle/service/nvnflinger/ui/graphic_buffer.cpp
index ce70946ec..ede2a1193 100644
--- a/src/core/hle/service/nvnflinger/ui/graphic_buffer.cpp
+++ b/src/core/hle/service/nvnflinger/ui/graphic_buffer.cpp
@@ -22,11 +22,13 @@ GraphicBuffer::GraphicBuffer(Service::Nvidia::NvCore::NvMap& nvmap,
: NvGraphicBuffer(GetBuffer(buffer)), m_nvmap(std::addressof(nvmap)) {
if (this->BufferId() > 0) {
m_nvmap->DuplicateHandle(this->BufferId(), true);
+ m_nvmap->PinHandle(this->BufferId(), false);
}
}
GraphicBuffer::~GraphicBuffer() {
if (m_nvmap != nullptr && this->BufferId() > 0) {
+ m_nvmap->UnpinHandle(this->BufferId());
m_nvmap->FreeHandle(this->BufferId(), true);
}
}