summaryrefslogtreecommitdiffstats
path: root/src/video_core/gpu.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/video_core/gpu.cpp')
-rw-r--r--src/video_core/gpu.cpp95
1 files changed, 83 insertions, 12 deletions
diff --git a/src/video_core/gpu.cpp b/src/video_core/gpu.cpp
index e91f52938..6ab06775f 100644
--- a/src/video_core/gpu.cpp
+++ b/src/video_core/gpu.cpp
@@ -10,6 +10,7 @@
#include "core/core_timing.h"
#include "core/core_timing_util.h"
#include "core/frontend/emu_window.h"
+#include "core/hardware_interrupt_manager.h"
#include "core/memory.h"
#include "core/settings.h"
#include "video_core/engines/fermi_2d.h"
@@ -36,7 +37,8 @@ GPU::GPU(Core::System& system_, bool is_async_, bool use_nvdec_)
kepler_compute{std::make_unique<Engines::KeplerCompute>(system, *memory_manager)},
maxwell_dma{std::make_unique<Engines::MaxwellDMA>(system, *memory_manager)},
kepler_memory{std::make_unique<Engines::KeplerMemory>(system, *memory_manager)},
- shader_notify{std::make_unique<VideoCore::ShaderNotify>()}, is_async{is_async_} {}
+ shader_notify{std::make_unique<VideoCore::ShaderNotify>()}, is_async{is_async_},
+ gpu_thread{system_, is_async_} {}
GPU::~GPU() = default;
@@ -198,10 +200,6 @@ void GPU::SyncGuestHost() {
renderer->Rasterizer().SyncGuestHost();
}
-void GPU::OnCommandListEnd() {
- renderer->Rasterizer().ReleaseFences();
-}
-
enum class GpuSemaphoreOperation {
AcquireEqual = 0x1,
WriteLong = 0x2,
@@ -232,8 +230,12 @@ void GPU::CallMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32
CallEngineMultiMethod(method, subchannel, base_start, amount, methods_pending);
} else {
for (std::size_t i = 0; i < amount; i++) {
- CallPullerMethod(
- {method, base_start[i], subchannel, methods_pending - static_cast<u32>(i)});
+ CallPullerMethod(MethodCall{
+ method,
+ base_start[i],
+ subchannel,
+ methods_pending - static_cast<u32>(i),
+ });
}
}
}
@@ -295,8 +297,7 @@ void GPU::CallPullerMethod(const MethodCall& method_call) {
break;
}
default:
- LOG_ERROR(HW_GPU, "Special puller engine method {:X} not implemented",
- static_cast<u32>(method));
+ LOG_ERROR(HW_GPU, "Special puller engine method {:X} not implemented", method);
break;
}
}
@@ -375,7 +376,7 @@ void GPU::ProcessBindMethod(const MethodCall& method_call) {
dma_pusher->BindSubchannel(kepler_memory.get(), method_call.subchannel);
break;
default:
- UNIMPLEMENTED_MSG("Unimplemented engine {:04X}", static_cast<u32>(engine_id));
+ UNIMPLEMENTED_MSG("Unimplemented engine {:04X}", engine_id);
}
}
@@ -388,8 +389,7 @@ void GPU::ProcessFenceActionMethod() {
IncrementSyncPoint(regs.fence_action.syncpoint_id);
break;
default:
- UNIMPLEMENTED_MSG("Unimplemented operation {}",
- static_cast<u32>(regs.fence_action.op.Value()));
+ UNIMPLEMENTED_MSG("Unimplemented operation {}", regs.fence_action.op.Value());
}
}
@@ -459,4 +459,75 @@ void GPU::ProcessSemaphoreAcquire() {
}
}
+void GPU::Start() {
+ gpu_thread.StartThread(*renderer, renderer->Context(), *dma_pusher, *cdma_pusher);
+ cpu_context = renderer->GetRenderWindow().CreateSharedContext();
+ cpu_context->MakeCurrent();
+}
+
+void GPU::ObtainContext() {
+ cpu_context->MakeCurrent();
+}
+
+void GPU::ReleaseContext() {
+ cpu_context->DoneCurrent();
+}
+
+void GPU::PushGPUEntries(Tegra::CommandList&& entries) {
+ gpu_thread.SubmitList(std::move(entries));
+}
+
+void GPU::PushCommandBuffer(Tegra::ChCommandHeaderList& entries) {
+ if (!use_nvdec) {
+ return;
+ }
+ // This condition fires when a video stream ends, clear all intermediary data
+ if (entries[0].raw == 0xDEADB33F) {
+ cdma_pusher.reset();
+ return;
+ }
+ if (!cdma_pusher) {
+ cdma_pusher = std::make_unique<Tegra::CDmaPusher>(*this);
+ }
+
+ // SubmitCommandBuffer would make the nvdec operations async, this is not currently working
+ // TODO(ameerj): RE proper async nvdec operation
+ // gpu_thread.SubmitCommandBuffer(std::move(entries));
+
+ cdma_pusher->Push(std::move(entries));
+ cdma_pusher->DispatchCalls();
+}
+
+void GPU::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) {
+ gpu_thread.SwapBuffers(framebuffer);
+}
+
+void GPU::FlushRegion(VAddr addr, u64 size) {
+ gpu_thread.FlushRegion(addr, size);
+}
+
+void GPU::InvalidateRegion(VAddr addr, u64 size) {
+ gpu_thread.InvalidateRegion(addr, size);
+}
+
+void GPU::FlushAndInvalidateRegion(VAddr addr, u64 size) {
+ gpu_thread.FlushAndInvalidateRegion(addr, size);
+}
+
+void GPU::TriggerCpuInterrupt(const u32 syncpoint_id, const u32 value) const {
+ auto& interrupt_manager = system.InterruptManager();
+ interrupt_manager.GPUInterruptSyncpt(syncpoint_id, value);
+}
+
+void GPU::WaitIdle() const {
+ gpu_thread.WaitIdle();
+}
+
+void GPU::OnCommandListEnd() {
+ if (is_async) {
+ // This command only applies to asynchronous GPU mode
+ gpu_thread.OnCommandListEnd();
+ }
+}
+
} // namespace Tegra