// Copyright 2018 yuzu emulator team // Licensed under GPLv2 or any later version // Refer to the license.txt file included. #include "common/assert.h" #include "common/logging/log.h" #include "core/hle/service/nvdrv/devices/nvhost_as_gpu.h" #include "core/hle/service/nvdrv/devices/nvmap.h" namespace Service { namespace Nvidia { namespace Devices { u32 nvhost_as_gpu::ioctl(Ioctl command, const std::vector& input, std::vector& output) { LOG_DEBUG(Service_NVDRV, "called, command=0x%08x, input_size=0x%llx, output_size=0x%llx", command, input.size(), output.size()); switch (static_cast(command.raw)) { case IoctlCommand::IocInitalizeExCommand: return InitalizeEx(input, output); case IoctlCommand::IocAllocateSpaceCommand: return AllocateSpace(input, output); case IoctlCommand::IocMapBufferExCommand: return MapBufferEx(input, output); case IoctlCommand::IocBindChannelCommand: return BindChannel(input, output); case IoctlCommand::IocGetVaRegionsCommand: return GetVARegions(input, output); } return 0; } u32 nvhost_as_gpu::InitalizeEx(const std::vector& input, std::vector& output) { IoctlInitalizeEx params{}; std::memcpy(¶ms, input.data(), input.size()); LOG_WARNING(Service_NVDRV, "(STUBBED) called, big_page_size=0x%x", params.big_page_size); std::memcpy(output.data(), ¶ms, output.size()); return 0; } u32 nvhost_as_gpu::AllocateSpace(const std::vector& input, std::vector& output) { IoctlAllocSpace params{}; std::memcpy(¶ms, input.data(), input.size()); LOG_DEBUG(Service_NVDRV, "called, pages=%x, page_size=%x, flags=%x", params.pages, params.page_size, params.flags); const u64 size{static_cast(params.pages) * static_cast(params.page_size)}; if (params.flags & 1) { params.offset = memory_manager->AllocateSpace(params.offset, size, 1); } else { params.offset = memory_manager->AllocateSpace(size, params.align); } std::memcpy(output.data(), ¶ms, output.size()); return 0; } u32 nvhost_as_gpu::MapBufferEx(const std::vector& input, std::vector& output) { IoctlMapBufferEx params{}; std::memcpy(¶ms, input.data(), input.size()); LOG_DEBUG(Service_NVDRV, "called, flags=%x, nvmap_handle=%x, buffer_offset=%lx, mapping_size=%lx, offset=%lx", params.flags, params.nvmap_handle, params.buffer_offset, params.mapping_size, params.offset); if (!params.nvmap_handle) { return 0; } auto object = nvmap_dev->GetObject(params.nvmap_handle); ASSERT(object); if (params.flags & 1) { params.offset = memory_manager->MapBufferEx(object->addr, params.offset, object->size); } else { params.offset = memory_manager->MapBufferEx(object->addr, object->size); } std::memcpy(output.data(), ¶ms, output.size()); return 0; } u32 nvhost_as_gpu::BindChannel(const std::vector& input, std::vector& output) { IoctlBindChannel params{}; std::memcpy(¶ms, input.data(), input.size()); LOG_DEBUG(Service_NVDRV, "called, fd=%x", params.fd); channel = params.fd; std::memcpy(output.data(), ¶ms, output.size()); return 0; } u32 nvhost_as_gpu::GetVARegions(const std::vector& input, std::vector& output) { IoctlGetVaRegions params{}; std::memcpy(¶ms, input.data(), input.size()); LOG_WARNING(Service_NVDRV, "(STUBBED) called, buf_addr=%lx, buf_size=%x", params.buf_addr, params.buf_size); params.buf_size = 0x30; params.regions[0].offset = 0x04000000; params.regions[0].page_size = 0x1000; params.regions[0].pages = 0x3fbfff; params.regions[1].offset = 0x04000000; params.regions[1].page_size = 0x10000; params.regions[1].pages = 0x1bffff; // TODO(ogniK): This probably can stay stubbed but should add support way way later std::memcpy(output.data(), ¶ms, output.size()); return 0; } } // namespace Devices } // namespace Nvidia } // namespace Service