summaryrefslogtreecommitdiffstats
path: root/src/core/hle/service/nvdrv
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/hle/service/nvdrv')
-rw-r--r--src/core/hle/service/nvdrv/core/nvmap.cpp64
-rw-r--r--src/core/hle/service/nvdrv/core/nvmap.h19
-rw-r--r--src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp2
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp57
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h20
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp8
-rw-r--r--src/core/hle/service/nvdrv/devices/nvmap.cpp4
-rw-r--r--src/core/hle/service/nvdrv/nvdrv_interface.cpp6
8 files changed, 79 insertions, 101 deletions
diff --git a/src/core/hle/service/nvdrv/core/nvmap.cpp b/src/core/hle/service/nvdrv/core/nvmap.cpp
index fd6c9aa0c..7879c6f04 100644
--- a/src/core/hle/service/nvdrv/core/nvmap.cpp
+++ b/src/core/hle/service/nvdrv/core/nvmap.cpp
@@ -2,6 +2,8 @@
// SPDX-FileCopyrightText: 2022 Skyline Team and Contributors
// SPDX-License-Identifier: GPL-3.0-or-later
+#include <functional>
+
#include "common/alignment.h"
#include "common/assert.h"
#include "common/logging/log.h"
@@ -18,6 +20,7 @@ NvMap::Handle::Handle(u64 size_, Id id_)
}
NvResult NvMap::Handle::Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress) {
+ std::scoped_lock lock(mutex);
// Handles cannot be allocated twice
if (allocated) {
return NvResult::AccessDenied;
@@ -78,11 +81,9 @@ void NvMap::UnmapHandle(Handle& handle_description) {
// Free and unmap the handle from the SMMU
auto& smmu = host1x.MemoryManager();
- smmu.Unmap(static_cast<DAddr>(handle_description.pin_virt_address),
- handle_description.aligned_size);
- smmu.Free(handle_description.pin_virt_address,
- static_cast<size_t>(handle_description.aligned_size));
- handle_description.pin_virt_address = 0;
+ smmu.Unmap(handle_description.d_address, handle_description.aligned_size);
+ smmu.Free(handle_description.d_address, static_cast<size_t>(handle_description.aligned_size));
+ handle_description.d_address = 0;
}
bool NvMap::TryRemoveHandle(const Handle& handle_description) {
@@ -123,41 +124,16 @@ std::shared_ptr<NvMap::Handle> NvMap::GetHandle(Handle::Id handle) {
}
}
-VAddr NvMap::GetHandleAddress(Handle::Id handle) {
+DAddr NvMap::GetHandleAddress(Handle::Id handle) {
std::scoped_lock lock(handles_lock);
try {
- return handles.at(handle)->address;
+ return handles.at(handle)->d_address;
} catch (std::out_of_range&) {
return 0;
}
}
-NvResult NvMap::AllocateHandle(Handle::Id handle, Handle::Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress, size_t session_id) {
- auto handle_description{GetHandle(handle)};
- if (!handle_description) [[unlikely]] {
- return NvResult::BadParameter;
- }
-
- if (handle_description->allocated) [[unlikely]] {
- return NvResult::InsufficientMemory;
- }
-
- std::scoped_lock lock(handle_description->mutex);
- NvResult result = handle_description->Alloc(pFlags, pAlign, pKind, pAddress);
- if (result != NvResult::Success) {
- return result;
- }
- auto& smmu = host1x.MemoryManager();
- size_t total_size = static_cast<size_t>(handle_description->aligned_size);
- handle_description->d_address = smmu.Allocate(total_size);
- if (handle_description->d_address == 0) {
- return NvResult::InsufficientMemory;
- }
- smmu.Map(handle_description->d_address, handle_description->address, total_size, session_id);
- return NvResult::Success;
-}
-
-u32 NvMap::PinHandle(NvMap::Handle::Id handle, size_t session_id) {
+DAddr NvMap::PinHandle(NvMap::Handle::Id handle, size_t session_id, bool low_area_pin) {
auto handle_description{GetHandle(handle)};
if (!handle_description) [[unlikely]] {
return 0;
@@ -176,35 +152,38 @@ u32 NvMap::PinHandle(NvMap::Handle::Id handle, size_t session_id) {
handle_description->unmap_queue_entry.reset();
handle_description->pins++;
- return handle_description->pin_virt_address;
+ return handle_description->d_address;
}
}
+ using namespace std::placeholders;
// If not then allocate some space and map it
DAddr address{};
auto& smmu = host1x.MemoryManager();
- while ((address = smmu.AllocatePinned(
- static_cast<size_t>(handle_description->aligned_size))) == 0) {
+ auto allocate = std::bind(&Tegra::MaxwellDeviceMemoryManager::Allocate, &smmu, _1);
+ //: std::bind(&Tegra::MaxwellDeviceMemoryManager::Allocate, &smmu, _1);
+ while ((address = allocate(static_cast<size_t>(handle_description->aligned_size))) == 0) {
// Free handles until the allocation succeeds
std::scoped_lock queueLock(unmap_queue_lock);
if (auto freeHandleDesc{unmap_queue.front()}) {
// Handles in the unmap queue are guaranteed not to be pinned so don't bother
// checking if they are before unmapping
std::scoped_lock freeLock(freeHandleDesc->mutex);
- if (handle_description->pin_virt_address)
+ if (handle_description->d_address)
UnmapHandle(*freeHandleDesc);
} else {
LOG_CRITICAL(Service_NVDRV, "Ran out of SMMU address space!");
}
}
+ handle_description->d_address = address;
+
smmu.Map(address, handle_description->address, handle_description->aligned_size,
session_id);
- handle_description->pin_virt_address = static_cast<u32>(address);
}
handle_description->pins++;
- return handle_description->pin_virt_address;
+ return handle_description->d_address;
}
void NvMap::UnpinHandle(Handle::Id handle) {
@@ -255,15 +234,10 @@ std::optional<NvMap::FreeInfo> NvMap::FreeHandle(Handle::Id handle, bool interna
LOG_WARNING(Service_NVDRV, "User duplicate count imbalance detected!");
} else if (handle_description->dupes == 0) {
// Force unmap the handle
- if (handle_description->pin_virt_address) {
+ if (handle_description->d_address) {
std::scoped_lock queueLock(unmap_queue_lock);
UnmapHandle(*handle_description);
}
- if (handle_description->allocated) {
- auto& smmu = host1x.MemoryManager();
- smmu.Free(handle_description->d_address, handle_description->aligned_size);
- smmu.Unmap(handle_description->d_address, handle_description->aligned_size);
- }
handle_description->pins = 0;
}
diff --git a/src/core/hle/service/nvdrv/core/nvmap.h b/src/core/hle/service/nvdrv/core/nvmap.h
index 7c3110d91..e9e9e8b5b 100644
--- a/src/core/hle/service/nvdrv/core/nvmap.h
+++ b/src/core/hle/service/nvdrv/core/nvmap.h
@@ -48,7 +48,7 @@ public:
using Id = u32;
Id id; //!< A globally unique identifier for this handle
- s32 pins{};
+ s64 pins{};
u32 pin_virt_address{};
std::optional<typename std::list<std::shared_ptr<Handle>>::iterator> unmap_queue_entry{};
@@ -63,15 +63,14 @@ public:
VAddr address{}; //!< The memory location in the guest's AS that this handle corresponds to,
//!< this can also be in the nvdrv tmem
- DAddr d_address{}; //!< The memory location in the device's AS that this handle corresponds to,
- //!< this can also be in the nvdrv tmem
bool is_shared_mem_mapped{}; //!< If this nvmap has been mapped with the MapSharedMem IPC
//!< call
u8 kind{}; //!< Used for memory compression
bool allocated{}; //!< If the handle has been allocated with `Alloc`
- u64 dma_map_addr{}; //! remove me after implementing pinning.
+ DAddr d_address{}; //!< The memory location in the device's AS that this handle corresponds to,
+ //!< this can also be in the nvdrv tmem
Handle(u64 size, Id id);
@@ -119,15 +118,7 @@ public:
std::shared_ptr<Handle> GetHandle(Handle::Id handle);
- VAddr GetHandleAddress(Handle::Id handle);
-
- /**
- * @brief Maps a handle into the SMMU address space
- * @note This operation is refcounted, the number of calls to this must eventually match the
- * number of calls to `UnpinHandle`
- * @return The SMMU virtual address that the handle has been mapped to
- */
- u32 PinHandle(Handle::Id handle, size_t session_id);
+ DAddr GetHandleAddress(Handle::Id handle);
/**
* @brief Maps a handle into the SMMU address space
@@ -135,7 +126,7 @@ public:
* number of calls to `UnpinHandle`
* @return The SMMU virtual address that the handle has been mapped to
*/
- NvResult AllocateHandle(Handle::Id handle, Handle::Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress, size_t session_id);
+ DAddr PinHandle(Handle::Id handle, size_t session_id, bool low_area_pin);
/**
* @brief When this has been called an equal number of times to `PinHandle` for the supplied
diff --git a/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp b/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp
index 0ff41c6b2..f1404b9da 100644
--- a/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp
@@ -42,7 +42,7 @@ void nvdisp_disp0::flip(u32 buffer_handle, u32 offset, android::PixelFormat form
u32 height, u32 stride, android::BufferTransformFlags transform,
const Common::Rectangle<int>& crop_rect,
std::array<Service::Nvidia::NvFence, 4>& fences, u32 num_fences) {
- const VAddr addr = nvmap.GetHandleAddress(buffer_handle);
+ const DAddr addr = nvmap.GetHandleAddress(buffer_handle);
LOG_TRACE(Service,
"Drawing from address {:X} offset {:08X} Width {} Height {} Stride {} Format {}",
addr, offset, width, height, stride, format);
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp
index c92a7b2f6..8bc10eac2 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp
@@ -40,15 +40,15 @@ NvResult nvhost_as_gpu::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> i
case 0x3:
return WrapFixed(this, &nvhost_as_gpu::FreeSpace, input, output);
case 0x5:
- return WrapFixed(this, &nvhost_as_gpu::UnmapBuffer, input, output);
+ return WrapFixed(this, &nvhost_as_gpu::UnmapBuffer, input, output, fd);
case 0x6:
- return WrapFixed(this, &nvhost_as_gpu::MapBufferEx, input, output);
+ return WrapFixed(this, &nvhost_as_gpu::MapBufferEx, input, output, fd);
case 0x8:
return WrapFixed(this, &nvhost_as_gpu::GetVARegions1, input, output);
case 0x9:
return WrapFixed(this, &nvhost_as_gpu::AllocAsEx, input, output);
case 0x14:
- return WrapVariable(this, &nvhost_as_gpu::Remap, input, output);
+ return WrapVariable(this, &nvhost_as_gpu::Remap, input, output, fd);
default:
break;
}
@@ -86,8 +86,15 @@ NvResult nvhost_as_gpu::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> i
return NvResult::NotImplemented;
}
-void nvhost_as_gpu::OnOpen(size_t session_id, DeviceFD fd) {}
-void nvhost_as_gpu::OnClose(DeviceFD fd) {}
+void nvhost_as_gpu::OnOpen(size_t session_id, DeviceFD fd) {
+ sessions[fd] = session_id;
+}
+void nvhost_as_gpu::OnClose(DeviceFD fd) {
+ auto it = sessions.find(fd);
+ if (it != sessions.end()) {
+ sessions.erase(it);
+ }
+}
NvResult nvhost_as_gpu::AllocAsEx(IoctlAllocAsEx& params) {
LOG_DEBUG(Service_NVDRV, "called, big_page_size=0x{:X}", params.big_page_size);
@@ -206,6 +213,8 @@ void nvhost_as_gpu::FreeMappingLocked(u64 offset) {
static_cast<u32>(aligned_size >> page_size_bits));
}
+ nvmap.UnpinHandle(mapping->handle);
+
// Sparse mappings shouldn't be fully unmapped, just returned to their sparse state
// Only FreeSpace can unmap them fully
if (mapping->sparse_alloc) {
@@ -259,7 +268,7 @@ NvResult nvhost_as_gpu::FreeSpace(IoctlFreeSpace& params) {
return NvResult::Success;
}
-NvResult nvhost_as_gpu::Remap(std::span<IoctlRemapEntry> entries) {
+NvResult nvhost_as_gpu::Remap(std::span<IoctlRemapEntry> entries, DeviceFD fd) {
LOG_DEBUG(Service_NVDRV, "called, num_entries=0x{:X}", entries.size());
if (!vm.initialised) {
@@ -293,19 +302,19 @@ NvResult nvhost_as_gpu::Remap(std::span<IoctlRemapEntry> entries) {
return NvResult::BadValue;
}
- VAddr cpu_address{static_cast<VAddr>(
- handle->address +
- (static_cast<u64>(entry.handle_offset_big_pages) << vm.big_page_size_bits))};
+ DAddr base = nvmap.PinHandle(entry.handle, sessions[fd], false);
+ DAddr device_address{static_cast<DAddr>(
+ base + (static_cast<u64>(entry.handle_offset_big_pages) << vm.big_page_size_bits))};
- gmmu->Map(virtual_address, cpu_address, size, static_cast<Tegra::PTEKind>(entry.kind),
- use_big_pages);
+ gmmu->Map(virtual_address, device_address, size,
+ static_cast<Tegra::PTEKind>(entry.kind), use_big_pages);
}
}
return NvResult::Success;
}
-NvResult nvhost_as_gpu::MapBufferEx(IoctlMapBufferEx& params) {
+NvResult nvhost_as_gpu::MapBufferEx(IoctlMapBufferEx& params, DeviceFD fd) {
LOG_DEBUG(Service_NVDRV,
"called, flags={:X}, nvmap_handle={:X}, buffer_offset={}, mapping_size={}"
", offset={}",
@@ -331,9 +340,9 @@ NvResult nvhost_as_gpu::MapBufferEx(IoctlMapBufferEx& params) {
}
u64 gpu_address{static_cast<u64>(params.offset + params.buffer_offset)};
- VAddr cpu_address{mapping->ptr + params.buffer_offset};
+ VAddr device_address{mapping->ptr + params.buffer_offset};
- gmmu->Map(gpu_address, cpu_address, params.mapping_size,
+ gmmu->Map(gpu_address, device_address, params.mapping_size,
static_cast<Tegra::PTEKind>(params.kind), mapping->big_page);
return NvResult::Success;
@@ -349,7 +358,8 @@ NvResult nvhost_as_gpu::MapBufferEx(IoctlMapBufferEx& params) {
return NvResult::BadValue;
}
- VAddr cpu_address{static_cast<VAddr>(handle->address + params.buffer_offset)};
+ DAddr device_address{static_cast<DAddr>(nvmap.PinHandle(params.handle, sessions[fd], false) +
+ params.buffer_offset)};
u64 size{params.mapping_size ? params.mapping_size : handle->orig_size};
bool big_page{[&]() {
@@ -373,15 +383,14 @@ NvResult nvhost_as_gpu::MapBufferEx(IoctlMapBufferEx& params) {
}
const bool use_big_pages = alloc->second.big_pages && big_page;
- gmmu->Map(params.offset, cpu_address, size, static_cast<Tegra::PTEKind>(params.kind),
+ gmmu->Map(params.offset, device_address, size, static_cast<Tegra::PTEKind>(params.kind),
use_big_pages);
- auto mapping{std::make_shared<Mapping>(cpu_address, params.offset, size, true,
- use_big_pages, alloc->second.sparse)};
+ auto mapping{std::make_shared<Mapping>(params.handle, device_address, params.offset, size,
+ true, use_big_pages, alloc->second.sparse)};
alloc->second.mappings.push_back(mapping);
mapping_map[params.offset] = mapping;
} else {
-
auto& allocator{big_page ? *vm.big_page_allocator : *vm.small_page_allocator};
u32 page_size{big_page ? vm.big_page_size : VM::YUZU_PAGESIZE};
u32 page_size_bits{big_page ? vm.big_page_size_bits : VM::PAGE_SIZE_BITS};
@@ -394,18 +403,18 @@ NvResult nvhost_as_gpu::MapBufferEx(IoctlMapBufferEx& params) {
return NvResult::InsufficientMemory;
}
- gmmu->Map(params.offset, cpu_address, Common::AlignUp(size, page_size),
+ gmmu->Map(params.offset, device_address, Common::AlignUp(size, page_size),
static_cast<Tegra::PTEKind>(params.kind), big_page);
- auto mapping{
- std::make_shared<Mapping>(cpu_address, params.offset, size, false, big_page, false)};
+ auto mapping{std::make_shared<Mapping>(params.handle, device_address, params.offset, size,
+ false, big_page, false)};
mapping_map[params.offset] = mapping;
}
return NvResult::Success;
}
-NvResult nvhost_as_gpu::UnmapBuffer(IoctlUnmapBuffer& params) {
+NvResult nvhost_as_gpu::UnmapBuffer(IoctlUnmapBuffer& params, DeviceFD fd) {
LOG_DEBUG(Service_NVDRV, "called, offset=0x{:X}", params.offset);
std::scoped_lock lock(mutex);
@@ -433,6 +442,8 @@ NvResult nvhost_as_gpu::UnmapBuffer(IoctlUnmapBuffer& params) {
gmmu->Unmap(params.offset, mapping->size);
}
+ nvmap.UnpinHandle(mapping->handle);
+
mapping_map.erase(params.offset);
} catch (const std::out_of_range&) {
LOG_WARNING(Service_NVDRV, "Couldn't find region to unmap at 0x{:X}", params.offset);
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h
index 0dd279f88..4b28f5078 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h
+++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h
@@ -141,9 +141,9 @@ private:
NvResult AllocAsEx(IoctlAllocAsEx& params);
NvResult AllocateSpace(IoctlAllocSpace& params);
- NvResult Remap(std::span<IoctlRemapEntry> params);
- NvResult MapBufferEx(IoctlMapBufferEx& params);
- NvResult UnmapBuffer(IoctlUnmapBuffer& params);
+ NvResult Remap(std::span<IoctlRemapEntry> params, DeviceFD fd);
+ NvResult MapBufferEx(IoctlMapBufferEx& params, DeviceFD fd);
+ NvResult UnmapBuffer(IoctlUnmapBuffer& params, DeviceFD fd);
NvResult FreeSpace(IoctlFreeSpace& params);
NvResult BindChannel(IoctlBindChannel& params);
@@ -159,16 +159,18 @@ private:
NvCore::NvMap& nvmap;
struct Mapping {
- VAddr ptr;
+ NvCore::NvMap::Handle::Id handle;
+ DAddr ptr;
u64 offset;
u64 size;
bool fixed;
bool big_page; // Only valid if fixed == false
bool sparse_alloc;
- Mapping(VAddr ptr_, u64 offset_, u64 size_, bool fixed_, bool big_page_, bool sparse_alloc_)
- : ptr(ptr_), offset(offset_), size(size_), fixed(fixed_), big_page(big_page_),
- sparse_alloc(sparse_alloc_) {}
+ Mapping(NvCore::NvMap::Handle::Id handle_, DAddr ptr_, u64 offset_, u64 size_, bool fixed_,
+ bool big_page_, bool sparse_alloc_)
+ : handle(handle_), ptr(ptr_), offset(offset_), size(size_), fixed(fixed_),
+ big_page(big_page_), sparse_alloc(sparse_alloc_) {}
};
struct Allocation {
@@ -212,9 +214,7 @@ private:
bool initialised{};
} vm;
std::shared_ptr<Tegra::MemoryManager> gmmu;
-
- // s32 channel{};
- // u32 big_page_size{VM::DEFAULT_BIG_PAGE_SIZE};
+ std::unordered_map<DeviceFD, size_t> sessions;
};
} // namespace Service::Nvidia::Devices
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp
index 9ab0ae4d8..78bc5f3c4 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp
@@ -95,6 +95,9 @@ NvResult nvhost_nvdec_common::Submit(IoctlSubmit& params, std::span<u8> data, De
offset += SliceVectors(data, fence_thresholds, params.fence_count, offset);
auto& gpu = system.GPU();
+ //auto& device_memory = system.Host1x().MemoryManager();
+ auto* session = core.GetSession(sessions[fd]);
+
if (gpu.UseNvdec()) {
for (std::size_t i = 0; i < syncpt_increments.size(); i++) {
const SyncptIncr& syncpt_incr = syncpt_increments[i];
@@ -106,7 +109,7 @@ NvResult nvhost_nvdec_common::Submit(IoctlSubmit& params, std::span<u8> data, De
const auto object = nvmap.GetHandle(cmd_buffer.memory_id);
ASSERT_OR_EXECUTE(object, return NvResult::InvalidState;);
Tegra::ChCommandHeaderList cmdlist(cmd_buffer.word_count);
- system.ApplicationMemory().ReadBlock(object->address + cmd_buffer.offset, cmdlist.data(),
+ session->process->GetMemory().ReadBlock(object->address + cmd_buffer.offset, cmdlist.data(),
cmdlist.size() * sizeof(u32));
gpu.PushCommandBuffer(core.Host1xDeviceFile().fd_to_id[fd], cmdlist);
}
@@ -136,7 +139,8 @@ NvResult nvhost_nvdec_common::GetWaitbase(IoctlGetWaitbase& params) {
NvResult nvhost_nvdec_common::MapBuffer(IoctlMapBuffer& params, std::span<MapBufferEntry> entries, DeviceFD fd) {
const size_t num_entries = std::min(params.num_entries, static_cast<u32>(entries.size()));
for (size_t i = 0; i < num_entries; i++) {
- entries[i].map_address = nvmap.PinHandle(entries[i].map_handle, sessions[fd]);
+ DAddr pin_address = nvmap.PinHandle(entries[i].map_handle, sessions[fd], true);
+ entries[i].map_address = static_cast<u32>(pin_address);
}
return NvResult::Success;
diff --git a/src/core/hle/service/nvdrv/devices/nvmap.cpp b/src/core/hle/service/nvdrv/devices/nvmap.cpp
index 2b107f009..7765ca1be 100644
--- a/src/core/hle/service/nvdrv/devices/nvmap.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvmap.cpp
@@ -123,8 +123,8 @@ NvResult nvmap::IocAlloc(IocAllocParams& params, DeviceFD fd) {
return NvResult::InsufficientMemory;
}
- const auto result = file.AllocateHandle(params.handle, params.flags, params.align, params.kind,
- params.address, sessions[fd]);
+ const auto result =
+ handle_description->Alloc(params.flags, params.align, params.kind, params.address);
if (result != NvResult::Success) {
LOG_CRITICAL(Service_NVDRV, "Object failed to allocate, handle={:08X}", params.handle);
return result;
diff --git a/src/core/hle/service/nvdrv/nvdrv_interface.cpp b/src/core/hle/service/nvdrv/nvdrv_interface.cpp
index 492ad849a..6e4825313 100644
--- a/src/core/hle/service/nvdrv/nvdrv_interface.cpp
+++ b/src/core/hle/service/nvdrv/nvdrv_interface.cpp
@@ -13,8 +13,6 @@
#include "core/hle/service/nvdrv/nvdrv.h"
#include "core/hle/service/nvdrv/nvdrv_interface.h"
-#pragma optimize("", off)
-
namespace Service::Nvidia {
void NVDRV::Open(HLERequestContext& ctx) {
@@ -173,8 +171,8 @@ void NVDRV::Initialize(HLERequestContext& ctx) {
[[maybe_unused]] const auto transfer_memory_size = rp.Pop<u32>();
auto& container = nvdrv->GetContainer();
- auto process = ctx.GetObjectFromHandle(process_handle);
- session_id = container.OpenSession(process->DynamicCast<Kernel::KProcess*>());
+ auto process = ctx.GetObjectFromHandle<Kernel::KProcess>(process_handle);
+ session_id = container.OpenSession(process.GetPointerUnsafe());
is_initialized = true;
}