summaryrefslogtreecommitdiffstats
path: root/src/video_core/renderer_vulkan
diff options
context:
space:
mode:
Diffstat (limited to 'src/video_core/renderer_vulkan')
-rw-r--r--src/video_core/renderer_vulkan/fixed_pipeline_state.cpp3
-rw-r--r--src/video_core/renderer_vulkan/fixed_pipeline_state.h10
-rw-r--r--src/video_core/renderer_vulkan/maxwell_to_vk.cpp24
-rw-r--r--src/video_core/renderer_vulkan/renderer_vulkan.cpp51
-rw-r--r--src/video_core/renderer_vulkan/renderer_vulkan.h25
-rw-r--r--src/video_core/renderer_vulkan/vk_blit_screen.cpp37
-rw-r--r--src/video_core/renderer_vulkan/vk_blit_screen.h25
-rw-r--r--src/video_core/renderer_vulkan/vk_buffer_cache.cpp17
-rw-r--r--src/video_core/renderer_vulkan/vk_buffer_cache.h7
-rw-r--r--src/video_core/renderer_vulkan/vk_command_pool.cpp46
-rw-r--r--src/video_core/renderer_vulkan/vk_command_pool.h34
-rw-r--r--src/video_core/renderer_vulkan/vk_compute_pass.cpp23
-rw-r--r--src/video_core/renderer_vulkan/vk_compute_pass.h4
-rw-r--r--src/video_core/renderer_vulkan/vk_compute_pipeline.cpp2
-rw-r--r--src/video_core/renderer_vulkan/vk_descriptor_pool.cpp19
-rw-r--r--src/video_core/renderer_vulkan/vk_descriptor_pool.h15
-rw-r--r--src/video_core/renderer_vulkan/vk_device.cpp32
-rw-r--r--src/video_core/renderer_vulkan/vk_fence_manager.cpp18
-rw-r--r--src/video_core/renderer_vulkan/vk_fence_manager.h8
-rw-r--r--src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp7
-rw-r--r--src/video_core/renderer_vulkan/vk_master_semaphore.cpp56
-rw-r--r--src/video_core/renderer_vulkan/vk_master_semaphore.h70
-rw-r--r--src/video_core/renderer_vulkan/vk_pipeline_cache.cpp95
-rw-r--r--src/video_core/renderer_vulkan/vk_pipeline_cache.h29
-rw-r--r--src/video_core/renderer_vulkan/vk_query_cache.cpp57
-rw-r--r--src/video_core/renderer_vulkan/vk_query_cache.h23
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.cpp142
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.h20
-rw-r--r--src/video_core/renderer_vulkan/vk_resource_manager.cpp311
-rw-r--r--src/video_core/renderer_vulkan/vk_resource_manager.h196
-rw-r--r--src/video_core/renderer_vulkan/vk_resource_pool.cpp63
-rw-r--r--src/video_core/renderer_vulkan/vk_resource_pool.h43
-rw-r--r--src/video_core/renderer_vulkan/vk_scheduler.cpp77
-rw-r--r--src/video_core/renderer_vulkan/vk_scheduler.h70
-rw-r--r--src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp49
-rw-r--r--src/video_core/renderer_vulkan/vk_staging_buffer_pool.h14
-rw-r--r--src/video_core/renderer_vulkan/vk_state_tracker.cpp19
-rw-r--r--src/video_core/renderer_vulkan/vk_state_tracker.h28
-rw-r--r--src/video_core/renderer_vulkan/vk_stream_buffer.cpp12
-rw-r--r--src/video_core/renderer_vulkan/vk_stream_buffer.h5
-rw-r--r--src/video_core/renderer_vulkan/vk_swapchain.cpp20
-rw-r--r--src/video_core/renderer_vulkan/vk_swapchain.h9
-rw-r--r--src/video_core/renderer_vulkan/vk_texture_cache.cpp25
-rw-r--r--src/video_core/renderer_vulkan/vk_texture_cache.h19
-rw-r--r--src/video_core/renderer_vulkan/wrapper.cpp71
-rw-r--r--src/video_core/renderer_vulkan/wrapper.h48
46 files changed, 921 insertions, 1057 deletions
diff --git a/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp b/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp
index 81a39a3b8..da5c550ea 100644
--- a/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp
+++ b/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp
@@ -58,6 +58,7 @@ void FixedPipelineState::Fill(const Maxwell& regs, bool has_extended_dynamic_sta
logic_op_enable.Assign(regs.logic_op.enable != 0 ? 1 : 0);
logic_op.Assign(PackLogicOp(regs.logic_op.operation));
rasterize_enable.Assign(regs.rasterize_enable != 0 ? 1 : 0);
+ topology.Assign(regs.draw.topology);
std::memcpy(&point_size, &regs.point_size, sizeof(point_size)); // TODO: C++20 std::bit_cast
@@ -131,7 +132,6 @@ void FixedPipelineState::BlendingAttachment::Fill(const Maxwell& regs, std::size
}
void FixedPipelineState::DynamicState::Fill(const Maxwell& regs) {
- const u32 topology_index = static_cast<u32>(regs.draw.topology.Value());
u32 packed_front_face = PackFrontFace(regs.front_face);
if (regs.screen_y_control.triangle_rast_flip != 0) {
// Flip front face
@@ -161,7 +161,6 @@ void FixedPipelineState::DynamicState::Fill(const Maxwell& regs) {
depth_test_enable.Assign(regs.depth_test_enable);
front_face.Assign(packed_front_face);
depth_test_func.Assign(PackComparisonOp(regs.depth_test_func));
- topology.Assign(topology_index);
cull_face.Assign(PackCullFace(regs.cull_face));
cull_enable.Assign(regs.cull_test_enabled != 0 ? 1 : 0);
diff --git a/src/video_core/renderer_vulkan/fixed_pipeline_state.h b/src/video_core/renderer_vulkan/fixed_pipeline_state.h
index cdcbb65f5..2c18eeaae 100644
--- a/src/video_core/renderer_vulkan/fixed_pipeline_state.h
+++ b/src/video_core/renderer_vulkan/fixed_pipeline_state.h
@@ -150,9 +150,8 @@ struct FixedPipelineState {
};
union {
u32 raw2;
- BitField<0, 4, u32> topology;
- BitField<4, 2, u32> cull_face;
- BitField<6, 1, u32> cull_enable;
+ BitField<0, 2, u32> cull_face;
+ BitField<2, 1, u32> cull_enable;
};
std::array<VertexBinding, Maxwell::NumVertexArrays> vertex_bindings;
@@ -169,10 +168,6 @@ struct FixedPipelineState {
Maxwell::FrontFace FrontFace() const noexcept {
return UnpackFrontFace(front_face.Value());
}
-
- constexpr Maxwell::PrimitiveTopology Topology() const noexcept {
- return static_cast<Maxwell::PrimitiveTopology>(topology.Value());
- }
};
union {
@@ -190,6 +185,7 @@ struct FixedPipelineState {
BitField<18, 1, u32> logic_op_enable;
BitField<19, 4, u32> logic_op;
BitField<23, 1, u32> rasterize_enable;
+ BitField<24, 4, Maxwell::PrimitiveTopology> topology;
};
u32 point_size;
std::array<u32, Maxwell::NumVertexArrays> binding_divisors;
diff --git a/src/video_core/renderer_vulkan/maxwell_to_vk.cpp b/src/video_core/renderer_vulkan/maxwell_to_vk.cpp
index f8c77f4fa..d22de1d81 100644
--- a/src/video_core/renderer_vulkan/maxwell_to_vk.cpp
+++ b/src/video_core/renderer_vulkan/maxwell_to_vk.cpp
@@ -78,9 +78,10 @@ VkSamplerAddressMode WrapMode(const VKDevice& device, Tegra::Texture::WrapMode w
case Tegra::Texture::WrapMode::MirrorOnceBorder:
UNIMPLEMENTED();
return VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE;
+ default:
+ UNIMPLEMENTED_MSG("Unimplemented wrap mode={}", static_cast<u32>(wrap_mode));
+ return {};
}
- UNIMPLEMENTED_MSG("Unimplemented wrap mode={}", static_cast<u32>(wrap_mode));
- return {};
}
VkCompareOp DepthCompareFunction(Tegra::Texture::DepthCompareFunc depth_compare_func) {
@@ -298,9 +299,10 @@ VkPrimitiveTopology PrimitiveTopology([[maybe_unused]] const VKDevice& device,
return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST;
case Maxwell::PrimitiveTopology::Patches:
return VK_PRIMITIVE_TOPOLOGY_PATCH_LIST;
+ default:
+ UNIMPLEMENTED_MSG("Unimplemented topology={}", static_cast<u32>(topology));
+ return {};
}
- UNIMPLEMENTED_MSG("Unimplemented topology={}", static_cast<u32>(topology));
- return {};
}
VkFormat VertexFormat(Maxwell::VertexAttribute::Type type, Maxwell::VertexAttribute::Size size) {
@@ -325,6 +327,8 @@ VkFormat VertexFormat(Maxwell::VertexAttribute::Type type, Maxwell::VertexAttrib
return VK_FORMAT_R16G16B16A16_UNORM;
case Maxwell::VertexAttribute::Size::Size_10_10_10_2:
return VK_FORMAT_A2B10G10R10_UNORM_PACK32;
+ default:
+ break;
}
break;
case Maxwell::VertexAttribute::Type::SignedNorm:
@@ -347,6 +351,8 @@ VkFormat VertexFormat(Maxwell::VertexAttribute::Type type, Maxwell::VertexAttrib
return VK_FORMAT_R16G16B16A16_SNORM;
case Maxwell::VertexAttribute::Size::Size_10_10_10_2:
return VK_FORMAT_A2B10G10R10_SNORM_PACK32;
+ default:
+ break;
}
break;
case Maxwell::VertexAttribute::Type::UnsignedScaled:
@@ -369,6 +375,8 @@ VkFormat VertexFormat(Maxwell::VertexAttribute::Type type, Maxwell::VertexAttrib
return VK_FORMAT_R16G16B16A16_USCALED;
case Maxwell::VertexAttribute::Size::Size_10_10_10_2:
return VK_FORMAT_A2B10G10R10_USCALED_PACK32;
+ default:
+ break;
}
break;
case Maxwell::VertexAttribute::Type::SignedScaled:
@@ -391,6 +399,8 @@ VkFormat VertexFormat(Maxwell::VertexAttribute::Type type, Maxwell::VertexAttrib
return VK_FORMAT_R16G16B16A16_SSCALED;
case Maxwell::VertexAttribute::Size::Size_10_10_10_2:
return VK_FORMAT_A2B10G10R10_SSCALED_PACK32;
+ default:
+ break;
}
break;
case Maxwell::VertexAttribute::Type::UnsignedInt:
@@ -421,6 +431,8 @@ VkFormat VertexFormat(Maxwell::VertexAttribute::Type type, Maxwell::VertexAttrib
return VK_FORMAT_R32G32B32A32_UINT;
case Maxwell::VertexAttribute::Size::Size_10_10_10_2:
return VK_FORMAT_A2B10G10R10_UINT_PACK32;
+ default:
+ break;
}
break;
case Maxwell::VertexAttribute::Type::SignedInt:
@@ -451,6 +463,8 @@ VkFormat VertexFormat(Maxwell::VertexAttribute::Type type, Maxwell::VertexAttrib
return VK_FORMAT_R32G32B32A32_SINT;
case Maxwell::VertexAttribute::Size::Size_10_10_10_2:
return VK_FORMAT_A2B10G10R10_SINT_PACK32;
+ default:
+ break;
}
break;
case Maxwell::VertexAttribute::Type::Float:
@@ -471,6 +485,8 @@ VkFormat VertexFormat(Maxwell::VertexAttribute::Type type, Maxwell::VertexAttrib
return VK_FORMAT_R32G32B32_SFLOAT;
case Maxwell::VertexAttribute::Size::Size_32_32_32_32:
return VK_FORMAT_R32G32B32A32_SFLOAT;
+ default:
+ break;
}
break;
}
diff --git a/src/video_core/renderer_vulkan/renderer_vulkan.cpp b/src/video_core/renderer_vulkan/renderer_vulkan.cpp
index 7ffc90cd0..f2610868e 100644
--- a/src/video_core/renderer_vulkan/renderer_vulkan.cpp
+++ b/src/video_core/renderer_vulkan/renderer_vulkan.cpp
@@ -25,9 +25,9 @@
#include "video_core/renderer_vulkan/renderer_vulkan.h"
#include "video_core/renderer_vulkan/vk_blit_screen.h"
#include "video_core/renderer_vulkan/vk_device.h"
+#include "video_core/renderer_vulkan/vk_master_semaphore.h"
#include "video_core/renderer_vulkan/vk_memory_manager.h"
#include "video_core/renderer_vulkan/vk_rasterizer.h"
-#include "video_core/renderer_vulkan/vk_resource_manager.h"
#include "video_core/renderer_vulkan/vk_scheduler.h"
#include "video_core/renderer_vulkan/vk_state_tracker.h"
#include "video_core/renderer_vulkan/vk_swapchain.h"
@@ -56,7 +56,7 @@ VkBool32 DebugCallback(VkDebugUtilsMessageSeverityFlagBitsEXT severity,
VkDebugUtilsMessageTypeFlagsEXT type,
const VkDebugUtilsMessengerCallbackDataEXT* data,
[[maybe_unused]] void* user_data) {
- const char* message{data->pMessage};
+ const char* const message{data->pMessage};
if (severity & VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT) {
LOG_CRITICAL(Render_Vulkan, "{}", message);
@@ -86,7 +86,7 @@ Common::DynamicLibrary OpenVulkanLibrary() {
if (!library.Open(filename.c_str())) {
// Android devices may not have libvulkan.so.1, only libvulkan.so.
filename = Common::DynamicLibrary::GetVersionedFilename("vulkan");
- library.Open(filename.c_str());
+ (void)library.Open(filename.c_str());
}
#endif
return library;
@@ -240,8 +240,12 @@ std::string BuildCommaSeparatedExtensions(std::vector<std::string> available_ext
} // Anonymous namespace
-RendererVulkan::RendererVulkan(Core::Frontend::EmuWindow& window, Core::System& system)
- : RendererBase(window), system{system} {}
+RendererVulkan::RendererVulkan(Core::TelemetrySession& telemetry_session_,
+ Core::Frontend::EmuWindow& emu_window,
+ Core::Memory::Memory& cpu_memory_, Tegra::GPU& gpu_,
+ std::unique_ptr<Core::Frontend::GraphicsContext> context)
+ : RendererBase{emu_window, std::move(context)}, telemetry_session{telemetry_session_},
+ cpu_memory{cpu_memory_}, gpu{gpu_} {}
RendererVulkan::~RendererVulkan() {
ShutDown();
@@ -268,11 +272,11 @@ void RendererVulkan::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) {
scheduler->WaitWorker();
swapchain->AcquireNextImage();
- const auto [fence, render_semaphore] = blit_screen->Draw(*framebuffer, use_accelerated);
+ const VkSemaphore render_semaphore = blit_screen->Draw(*framebuffer, use_accelerated);
- scheduler->Flush(false, render_semaphore);
+ scheduler->Flush(render_semaphore);
- if (swapchain->Present(render_semaphore, fence)) {
+ if (swapchain->Present(render_semaphore)) {
blit_screen->Recreate();
}
@@ -282,11 +286,6 @@ void RendererVulkan::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) {
render_window.PollEvents();
}
-bool RendererVulkan::TryPresent(int /*timeout_ms*/) {
- // TODO (bunnei): ImplementMe
- return true;
-}
-
bool RendererVulkan::Init() {
library = OpenVulkanLibrary();
std::tie(instance, instance_version) = CreateInstance(
@@ -299,23 +298,21 @@ bool RendererVulkan::Init() {
memory_manager = std::make_unique<VKMemoryManager>(*device);
- resource_manager = std::make_unique<VKResourceManager>(*device);
+ state_tracker = std::make_unique<StateTracker>(gpu);
+
+ scheduler = std::make_unique<VKScheduler>(*device, *state_tracker);
const auto& framebuffer = render_window.GetFramebufferLayout();
- swapchain = std::make_unique<VKSwapchain>(*surface, *device);
+ swapchain = std::make_unique<VKSwapchain>(*surface, *device, *scheduler);
swapchain->Create(framebuffer.width, framebuffer.height, false);
- state_tracker = std::make_unique<StateTracker>(system);
-
- scheduler = std::make_unique<VKScheduler>(*device, *resource_manager, *state_tracker);
-
- rasterizer = std::make_unique<RasterizerVulkan>(system, render_window, screen_info, *device,
- *resource_manager, *memory_manager,
- *state_tracker, *scheduler);
+ rasterizer = std::make_unique<RasterizerVulkan>(render_window, gpu, gpu.MemoryManager(),
+ cpu_memory, screen_info, *device,
+ *memory_manager, *state_tracker, *scheduler);
- blit_screen = std::make_unique<VKBlitScreen>(system, render_window, *rasterizer, *device,
- *resource_manager, *memory_manager, *swapchain,
- *scheduler, screen_info);
+ blit_screen =
+ std::make_unique<VKBlitScreen>(cpu_memory, render_window, *rasterizer, *device,
+ *memory_manager, *swapchain, *scheduler, screen_info);
return true;
}
@@ -333,7 +330,6 @@ void RendererVulkan::ShutDown() {
scheduler.reset();
swapchain.reset();
memory_manager.reset();
- resource_manager.reset();
device.reset();
}
@@ -442,8 +438,7 @@ void RendererVulkan::Report() const {
LOG_INFO(Render_Vulkan, "Device: {}", model_name);
LOG_INFO(Render_Vulkan, "Vulkan: {}", api_version);
- auto& telemetry_session = system.TelemetrySession();
- constexpr auto field = Common::Telemetry::FieldType::UserSystem;
+ static constexpr auto field = Common::Telemetry::FieldType::UserSystem;
telemetry_session.AddField(field, "GPU_Vendor", vendor_name);
telemetry_session.AddField(field, "GPU_Model", model_name);
telemetry_session.AddField(field, "GPU_Vulkan_Driver", driver_name);
diff --git a/src/video_core/renderer_vulkan/renderer_vulkan.h b/src/video_core/renderer_vulkan/renderer_vulkan.h
index 9617a93e9..1044ca124 100644
--- a/src/video_core/renderer_vulkan/renderer_vulkan.h
+++ b/src/video_core/renderer_vulkan/renderer_vulkan.h
@@ -14,7 +14,15 @@
#include "video_core/renderer_vulkan/wrapper.h"
namespace Core {
-class System;
+class TelemetrySession;
+}
+
+namespace Core::Memory {
+class Memory;
+}
+
+namespace Tegra {
+class GPU;
}
namespace Vulkan {
@@ -22,9 +30,7 @@ namespace Vulkan {
class StateTracker;
class VKBlitScreen;
class VKDevice;
-class VKFence;
class VKMemoryManager;
-class VKResourceManager;
class VKSwapchain;
class VKScheduler;
class VKImage;
@@ -38,13 +44,15 @@ struct VKScreenInfo {
class RendererVulkan final : public VideoCore::RendererBase {
public:
- explicit RendererVulkan(Core::Frontend::EmuWindow& window, Core::System& system);
+ explicit RendererVulkan(Core::TelemetrySession& telemtry_session,
+ Core::Frontend::EmuWindow& emu_window, Core::Memory::Memory& cpu_memory,
+ Tegra::GPU& gpu,
+ std::unique_ptr<Core::Frontend::GraphicsContext> context);
~RendererVulkan() override;
bool Init() override;
void ShutDown() override;
void SwapBuffers(const Tegra::FramebufferConfig* framebuffer) override;
- bool TryPresent(int timeout_ms) override;
static std::vector<std::string> EnumerateDevices();
@@ -57,7 +65,9 @@ private:
void Report() const;
- Core::System& system;
+ Core::TelemetrySession& telemetry_session;
+ Core::Memory::Memory& cpu_memory;
+ Tegra::GPU& gpu;
Common::DynamicLibrary library;
vk::InstanceDispatch dld;
@@ -71,11 +81,10 @@ private:
vk::DebugCallback debug_callback;
std::unique_ptr<VKDevice> device;
- std::unique_ptr<VKSwapchain> swapchain;
std::unique_ptr<VKMemoryManager> memory_manager;
- std::unique_ptr<VKResourceManager> resource_manager;
std::unique_ptr<StateTracker> state_tracker;
std::unique_ptr<VKScheduler> scheduler;
+ std::unique_ptr<VKSwapchain> swapchain;
std::unique_ptr<VKBlitScreen> blit_screen;
};
diff --git a/src/video_core/renderer_vulkan/vk_blit_screen.cpp b/src/video_core/renderer_vulkan/vk_blit_screen.cpp
index a551e3de8..b5b60309e 100644
--- a/src/video_core/renderer_vulkan/vk_blit_screen.cpp
+++ b/src/video_core/renderer_vulkan/vk_blit_screen.cpp
@@ -12,11 +12,9 @@
#include "common/assert.h"
#include "common/common_types.h"
#include "common/math_util.h"
-
#include "core/core.h"
#include "core/frontend/emu_window.h"
#include "core/memory.h"
-
#include "video_core/gpu.h"
#include "video_core/morton.h"
#include "video_core/rasterizer_interface.h"
@@ -24,8 +22,8 @@
#include "video_core/renderer_vulkan/vk_blit_screen.h"
#include "video_core/renderer_vulkan/vk_device.h"
#include "video_core/renderer_vulkan/vk_image.h"
+#include "video_core/renderer_vulkan/vk_master_semaphore.h"
#include "video_core/renderer_vulkan/vk_memory_manager.h"
-#include "video_core/renderer_vulkan/vk_resource_manager.h"
#include "video_core/renderer_vulkan/vk_scheduler.h"
#include "video_core/renderer_vulkan/vk_shader_util.h"
#include "video_core/renderer_vulkan/vk_swapchain.h"
@@ -210,17 +208,15 @@ struct VKBlitScreen::BufferData {
// Unaligned image data goes here
};
-VKBlitScreen::VKBlitScreen(Core::System& system, Core::Frontend::EmuWindow& render_window,
- VideoCore::RasterizerInterface& rasterizer, const VKDevice& device,
- VKResourceManager& resource_manager, VKMemoryManager& memory_manager,
- VKSwapchain& swapchain, VKScheduler& scheduler,
- const VKScreenInfo& screen_info)
- : system{system}, render_window{render_window}, rasterizer{rasterizer}, device{device},
- resource_manager{resource_manager}, memory_manager{memory_manager}, swapchain{swapchain},
- scheduler{scheduler}, image_count{swapchain.GetImageCount()}, screen_info{screen_info} {
- watches.resize(image_count);
- std::generate(watches.begin(), watches.end(),
- []() { return std::make_unique<VKFenceWatch>(); });
+VKBlitScreen::VKBlitScreen(Core::Memory::Memory& cpu_memory_,
+ Core::Frontend::EmuWindow& render_window_,
+ VideoCore::RasterizerInterface& rasterizer_, const VKDevice& device_,
+ VKMemoryManager& memory_manager_, VKSwapchain& swapchain_,
+ VKScheduler& scheduler_, const VKScreenInfo& screen_info_)
+ : cpu_memory{cpu_memory_}, render_window{render_window_}, rasterizer{rasterizer_},
+ device{device_}, memory_manager{memory_manager_}, swapchain{swapchain_},
+ scheduler{scheduler_}, image_count{swapchain.GetImageCount()}, screen_info{screen_info_} {
+ resource_ticks.resize(image_count);
CreateStaticResources();
CreateDynamicResources();
@@ -232,15 +228,16 @@ void VKBlitScreen::Recreate() {
CreateDynamicResources();
}
-std::tuple<VKFence&, VkSemaphore> VKBlitScreen::Draw(const Tegra::FramebufferConfig& framebuffer,
- bool use_accelerated) {
+VkSemaphore VKBlitScreen::Draw(const Tegra::FramebufferConfig& framebuffer, bool use_accelerated) {
RefreshResources(framebuffer);
// Finish any pending renderpass
scheduler.RequestOutsideRenderPassOperationContext();
const std::size_t image_index = swapchain.GetImageIndex();
- watches[image_index]->Watch(scheduler.GetFence());
+
+ scheduler.Wait(resource_ticks[image_index]);
+ resource_ticks[image_index] = scheduler.CurrentTick();
VKImage* blit_image = use_accelerated ? screen_info.image : raw_images[image_index].get();
@@ -259,7 +256,7 @@ std::tuple<VKFence&, VkSemaphore> VKBlitScreen::Draw(const Tegra::FramebufferCon
const auto pixel_format =
VideoCore::Surface::PixelFormatFromGPUPixelFormat(framebuffer.pixel_format);
const VAddr framebuffer_addr = framebuffer.address + framebuffer.offset;
- const auto host_ptr = system.Memory().GetPointer(framebuffer_addr);
+ const auto host_ptr = cpu_memory.GetPointer(framebuffer_addr);
rasterizer.FlushRegion(ToCacheAddr(host_ptr), GetSizeInBytes(framebuffer));
// TODO(Rodrigo): Read this from HLE
@@ -343,7 +340,7 @@ std::tuple<VKFence&, VkSemaphore> VKBlitScreen::Draw(const Tegra::FramebufferCon
cmdbuf.EndRenderPass();
});
- return {scheduler.GetFence(), *semaphores[image_index]};
+ return *semaphores[image_index];
}
void VKBlitScreen::CreateStaticResources() {
@@ -711,7 +708,7 @@ void VKBlitScreen::CreateFramebuffers() {
void VKBlitScreen::ReleaseRawImages() {
for (std::size_t i = 0; i < raw_images.size(); ++i) {
- watches[i]->Wait();
+ scheduler.Wait(resource_ticks.at(i));
}
raw_images.clear();
raw_buffer_commits.clear();
diff --git a/src/video_core/renderer_vulkan/vk_blit_screen.h b/src/video_core/renderer_vulkan/vk_blit_screen.h
index 243640fab..8f2839214 100644
--- a/src/video_core/renderer_vulkan/vk_blit_screen.h
+++ b/src/video_core/renderer_vulkan/vk_blit_screen.h
@@ -5,16 +5,18 @@
#pragma once
#include <memory>
-#include <tuple>
#include "video_core/renderer_vulkan/vk_memory_manager.h"
-#include "video_core/renderer_vulkan/vk_resource_manager.h"
#include "video_core/renderer_vulkan/wrapper.h"
namespace Core {
class System;
}
+namespace Core::Memory {
+class Memory;
+}
+
namespace Core::Frontend {
class EmuWindow;
}
@@ -30,26 +32,26 @@ class RasterizerInterface;
namespace Vulkan {
struct ScreenInfo;
+
class RasterizerVulkan;
class VKDevice;
-class VKFence;
class VKImage;
class VKScheduler;
class VKSwapchain;
class VKBlitScreen final {
public:
- explicit VKBlitScreen(Core::System& system, Core::Frontend::EmuWindow& render_window,
+ explicit VKBlitScreen(Core::Memory::Memory& cpu_memory,
+ Core::Frontend::EmuWindow& render_window,
VideoCore::RasterizerInterface& rasterizer, const VKDevice& device,
- VKResourceManager& resource_manager, VKMemoryManager& memory_manager,
- VKSwapchain& swapchain, VKScheduler& scheduler,
- const VKScreenInfo& screen_info);
+ VKMemoryManager& memory_manager, VKSwapchain& swapchain,
+ VKScheduler& scheduler, const VKScreenInfo& screen_info);
~VKBlitScreen();
void Recreate();
- std::tuple<VKFence&, VkSemaphore> Draw(const Tegra::FramebufferConfig& framebuffer,
- bool use_accelerated);
+ [[nodiscard]] VkSemaphore Draw(const Tegra::FramebufferConfig& framebuffer,
+ bool use_accelerated);
private:
struct BufferData;
@@ -81,11 +83,10 @@ private:
u64 GetRawImageOffset(const Tegra::FramebufferConfig& framebuffer,
std::size_t image_index) const;
- Core::System& system;
+ Core::Memory::Memory& cpu_memory;
Core::Frontend::EmuWindow& render_window;
VideoCore::RasterizerInterface& rasterizer;
const VKDevice& device;
- VKResourceManager& resource_manager;
VKMemoryManager& memory_manager;
VKSwapchain& swapchain;
VKScheduler& scheduler;
@@ -106,7 +107,7 @@ private:
vk::Buffer buffer;
VKMemoryCommit buffer_commit;
- std::vector<std::unique_ptr<VKFenceWatch>> watches;
+ std::vector<u64> resource_ticks;
std::vector<vk::Semaphore> semaphores;
std::vector<std::unique_ptr<VKImage>> raw_images;
diff --git a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp
index 1d2f8b557..d9d3da9ea 100644
--- a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp
@@ -145,14 +145,15 @@ void Buffer::CopyFrom(const Buffer& src, std::size_t src_offset, std::size_t dst
});
}
-VKBufferCache::VKBufferCache(VideoCore::RasterizerInterface& rasterizer, Core::System& system,
- const VKDevice& device, VKMemoryManager& memory_manager,
- VKScheduler& scheduler, VKStagingBufferPool& staging_pool)
- : VideoCommon::BufferCache<Buffer, VkBuffer, VKStreamBuffer>{rasterizer, system,
- CreateStreamBuffer(device,
- scheduler)},
- device{device}, memory_manager{memory_manager}, scheduler{scheduler}, staging_pool{
- staging_pool} {}
+VKBufferCache::VKBufferCache(VideoCore::RasterizerInterface& rasterizer,
+ Tegra::MemoryManager& gpu_memory, Core::Memory::Memory& cpu_memory,
+ const VKDevice& device_, VKMemoryManager& memory_manager_,
+ VKScheduler& scheduler_, VKStagingBufferPool& staging_pool_)
+ : VideoCommon::BufferCache<Buffer, VkBuffer, VKStreamBuffer>{rasterizer, gpu_memory, cpu_memory,
+ CreateStreamBuffer(device_,
+ scheduler_)},
+ device{device_}, memory_manager{memory_manager_}, scheduler{scheduler_}, staging_pool{
+ staging_pool_} {}
VKBufferCache::~VKBufferCache() = default;
diff --git a/src/video_core/renderer_vulkan/vk_buffer_cache.h b/src/video_core/renderer_vulkan/vk_buffer_cache.h
index 991ee451c..7fb5ceedf 100644
--- a/src/video_core/renderer_vulkan/vk_buffer_cache.h
+++ b/src/video_core/renderer_vulkan/vk_buffer_cache.h
@@ -13,10 +13,6 @@
#include "video_core/renderer_vulkan/vk_stream_buffer.h"
#include "video_core/renderer_vulkan/wrapper.h"
-namespace Core {
-class System;
-}
-
namespace Vulkan {
class VKDevice;
@@ -53,7 +49,8 @@ private:
class VKBufferCache final : public VideoCommon::BufferCache<Buffer, VkBuffer, VKStreamBuffer> {
public:
- explicit VKBufferCache(VideoCore::RasterizerInterface& rasterizer, Core::System& system,
+ explicit VKBufferCache(VideoCore::RasterizerInterface& rasterizer,
+ Tegra::MemoryManager& gpu_memory, Core::Memory::Memory& cpu_memory,
const VKDevice& device, VKMemoryManager& memory_manager,
VKScheduler& scheduler, VKStagingBufferPool& staging_pool);
~VKBufferCache();
diff --git a/src/video_core/renderer_vulkan/vk_command_pool.cpp b/src/video_core/renderer_vulkan/vk_command_pool.cpp
new file mode 100644
index 000000000..6339f4fe0
--- /dev/null
+++ b/src/video_core/renderer_vulkan/vk_command_pool.cpp
@@ -0,0 +1,46 @@
+// Copyright 2020 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include <cstddef>
+
+#include "video_core/renderer_vulkan/vk_command_pool.h"
+#include "video_core/renderer_vulkan/vk_device.h"
+#include "video_core/renderer_vulkan/wrapper.h"
+
+namespace Vulkan {
+
+constexpr size_t COMMAND_BUFFER_POOL_SIZE = 0x1000;
+
+struct CommandPool::Pool {
+ vk::CommandPool handle;
+ vk::CommandBuffers cmdbufs;
+};
+
+CommandPool::CommandPool(MasterSemaphore& master_semaphore, const VKDevice& device)
+ : ResourcePool(master_semaphore, COMMAND_BUFFER_POOL_SIZE), device{device} {}
+
+CommandPool::~CommandPool() = default;
+
+void CommandPool::Allocate(size_t begin, size_t end) {
+ // Command buffers are going to be commited, recorded, executed every single usage cycle.
+ // They are also going to be reseted when commited.
+ Pool& pool = pools.emplace_back();
+ pool.handle = device.GetLogical().CreateCommandPool({
+ .sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,
+ .pNext = nullptr,
+ .flags =
+ VK_COMMAND_POOL_CREATE_TRANSIENT_BIT | VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT,
+ .queueFamilyIndex = device.GetGraphicsFamily(),
+ });
+ pool.cmdbufs = pool.handle.Allocate(COMMAND_BUFFER_POOL_SIZE);
+}
+
+VkCommandBuffer CommandPool::Commit() {
+ const size_t index = CommitResource();
+ const auto pool_index = index / COMMAND_BUFFER_POOL_SIZE;
+ const auto sub_index = index % COMMAND_BUFFER_POOL_SIZE;
+ return pools[pool_index].cmdbufs[sub_index];
+}
+
+} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_command_pool.h b/src/video_core/renderer_vulkan/vk_command_pool.h
new file mode 100644
index 000000000..b9cb3fb5d
--- /dev/null
+++ b/src/video_core/renderer_vulkan/vk_command_pool.h
@@ -0,0 +1,34 @@
+// Copyright 2020 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <cstddef>
+#include <vector>
+
+#include "video_core/renderer_vulkan/vk_resource_pool.h"
+#include "video_core/renderer_vulkan/wrapper.h"
+
+namespace Vulkan {
+
+class MasterSemaphore;
+class VKDevice;
+
+class CommandPool final : public ResourcePool {
+public:
+ explicit CommandPool(MasterSemaphore& master_semaphore, const VKDevice& device);
+ ~CommandPool() override;
+
+ void Allocate(size_t begin, size_t end) override;
+
+ VkCommandBuffer Commit();
+
+private:
+ struct Pool;
+
+ const VKDevice& device;
+ std::vector<Pool> pools;
+};
+
+} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_compute_pass.cpp b/src/video_core/renderer_vulkan/vk_compute_pass.cpp
index 182461ed9..9637c6059 100644
--- a/src/video_core/renderer_vulkan/vk_compute_pass.cpp
+++ b/src/video_core/renderer_vulkan/vk_compute_pass.cpp
@@ -112,7 +112,8 @@ constexpr u8 quad_array[] = {
0xf9, 0x00, 0x02, 0x00, 0x21, 0x00, 0x00, 0x00, 0xf8, 0x00, 0x02, 0x00, 0x23, 0x00, 0x00, 0x00,
0xf9, 0x00, 0x02, 0x00, 0x4b, 0x00, 0x00, 0x00, 0xf8, 0x00, 0x02, 0x00, 0x4e, 0x00, 0x00, 0x00,
0xf9, 0x00, 0x02, 0x00, 0x4c, 0x00, 0x00, 0x00, 0xf8, 0x00, 0x02, 0x00, 0x4b, 0x00, 0x00, 0x00,
- 0xfd, 0x00, 0x01, 0x00, 0x38, 0x00, 0x01, 0x00};
+ 0xfd, 0x00, 0x01, 0x00, 0x38, 0x00, 0x01, 0x00,
+};
VkDescriptorSetLayoutBinding BuildQuadArrayPassDescriptorSetLayoutBinding() {
return {
@@ -218,7 +219,8 @@ constexpr u8 uint8_pass[] = {
0x2a, 0x00, 0x00, 0x00, 0x2b, 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00,
0x24, 0x00, 0x00, 0x00, 0x3e, 0x00, 0x03, 0x00, 0x2b, 0x00, 0x00, 0x00, 0x29, 0x00, 0x00, 0x00,
0xf9, 0x00, 0x02, 0x00, 0x1d, 0x00, 0x00, 0x00, 0xf8, 0x00, 0x02, 0x00, 0x1d, 0x00, 0x00, 0x00,
- 0xfd, 0x00, 0x01, 0x00, 0x38, 0x00, 0x01, 0x00};
+ 0xfd, 0x00, 0x01, 0x00, 0x38, 0x00, 0x01, 0x00,
+};
// Quad indexed SPIR-V module. Generated from the "shaders/" directory.
constexpr u8 QUAD_INDEXED_SPV[] = {
@@ -341,7 +343,8 @@ constexpr u8 QUAD_INDEXED_SPV[] = {
0xf9, 0x00, 0x02, 0x00, 0x35, 0x00, 0x00, 0x00, 0xf8, 0x00, 0x02, 0x00, 0x37, 0x00, 0x00, 0x00,
0xf9, 0x00, 0x02, 0x00, 0x73, 0x00, 0x00, 0x00, 0xf8, 0x00, 0x02, 0x00, 0x76, 0x00, 0x00, 0x00,
0xf9, 0x00, 0x02, 0x00, 0x74, 0x00, 0x00, 0x00, 0xf8, 0x00, 0x02, 0x00, 0x73, 0x00, 0x00, 0x00,
- 0xfd, 0x00, 0x01, 0x00, 0x38, 0x00, 0x01, 0x00};
+ 0xfd, 0x00, 0x01, 0x00, 0x38, 0x00, 0x01, 0x00,
+};
std::array<VkDescriptorSetLayoutBinding, 2> BuildInputOutputDescriptorSetBindings() {
return {{
@@ -448,12 +451,12 @@ VKComputePass::VKComputePass(const VKDevice& device, VKDescriptorPool& descripto
VKComputePass::~VKComputePass() = default;
-VkDescriptorSet VKComputePass::CommitDescriptorSet(VKUpdateDescriptorQueue& update_descriptor_queue,
- VKFence& fence) {
+VkDescriptorSet VKComputePass::CommitDescriptorSet(
+ VKUpdateDescriptorQueue& update_descriptor_queue) {
if (!descriptor_template) {
return nullptr;
}
- const auto set = descriptor_allocator->Commit(fence);
+ const VkDescriptorSet set = descriptor_allocator->Commit();
update_descriptor_queue.Send(*descriptor_template, set);
return set;
}
@@ -477,7 +480,7 @@ std::pair<VkBuffer, VkDeviceSize> QuadArrayPass::Assemble(u32 num_vertices, u32
update_descriptor_queue.Acquire();
update_descriptor_queue.AddBuffer(*buffer.handle, 0, staging_size);
- const auto set = CommitDescriptorSet(update_descriptor_queue, scheduler.GetFence());
+ const VkDescriptorSet set = CommitDescriptorSet(update_descriptor_queue);
scheduler.RequestOutsideRenderPassOperationContext();
@@ -520,13 +523,13 @@ Uint8Pass::~Uint8Pass() = default;
std::pair<VkBuffer, u64> Uint8Pass::Assemble(u32 num_vertices, VkBuffer src_buffer,
u64 src_offset) {
- const auto staging_size = static_cast<u32>(num_vertices * sizeof(u16));
+ const u32 staging_size = static_cast<u32>(num_vertices * sizeof(u16));
auto& buffer = staging_buffer_pool.GetUnusedBuffer(staging_size, false);
update_descriptor_queue.Acquire();
update_descriptor_queue.AddBuffer(src_buffer, src_offset, num_vertices);
update_descriptor_queue.AddBuffer(*buffer.handle, 0, staging_size);
- const auto set = CommitDescriptorSet(update_descriptor_queue, scheduler.GetFence());
+ const VkDescriptorSet set = CommitDescriptorSet(update_descriptor_queue);
scheduler.RequestOutsideRenderPassOperationContext();
scheduler.Record([layout = *layout, pipeline = *pipeline, buffer = *buffer.handle, set,
@@ -589,7 +592,7 @@ std::pair<VkBuffer, u64> QuadIndexedPass::Assemble(
update_descriptor_queue.Acquire();
update_descriptor_queue.AddBuffer(src_buffer, src_offset, input_size);
update_descriptor_queue.AddBuffer(*buffer.handle, 0, staging_size);
- const auto set = CommitDescriptorSet(update_descriptor_queue, scheduler.GetFence());
+ const VkDescriptorSet set = CommitDescriptorSet(update_descriptor_queue);
scheduler.RequestOutsideRenderPassOperationContext();
scheduler.Record([layout = *layout, pipeline = *pipeline, buffer = *buffer.handle, set,
diff --git a/src/video_core/renderer_vulkan/vk_compute_pass.h b/src/video_core/renderer_vulkan/vk_compute_pass.h
index 230b526bc..acc94f27e 100644
--- a/src/video_core/renderer_vulkan/vk_compute_pass.h
+++ b/src/video_core/renderer_vulkan/vk_compute_pass.h
@@ -15,7 +15,6 @@
namespace Vulkan {
class VKDevice;
-class VKFence;
class VKScheduler;
class VKStagingBufferPool;
class VKUpdateDescriptorQueue;
@@ -30,8 +29,7 @@ public:
~VKComputePass();
protected:
- VkDescriptorSet CommitDescriptorSet(VKUpdateDescriptorQueue& update_descriptor_queue,
- VKFence& fence);
+ VkDescriptorSet CommitDescriptorSet(VKUpdateDescriptorQueue& update_descriptor_queue);
vk::DescriptorUpdateTemplateKHR descriptor_template;
vk::PipelineLayout layout;
diff --git a/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp b/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp
index ed9d2991c..9be72dc9b 100644
--- a/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp
+++ b/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp
@@ -32,7 +32,7 @@ VkDescriptorSet VKComputePipeline::CommitDescriptorSet() {
if (!descriptor_template) {
return {};
}
- const auto set = descriptor_allocator.Commit(scheduler.GetFence());
+ const VkDescriptorSet set = descriptor_allocator.Commit();
update_descriptor_queue.Send(*descriptor_template, set);
return set;
}
diff --git a/src/video_core/renderer_vulkan/vk_descriptor_pool.cpp b/src/video_core/renderer_vulkan/vk_descriptor_pool.cpp
index ac4a0884e..f38e089d5 100644
--- a/src/video_core/renderer_vulkan/vk_descriptor_pool.cpp
+++ b/src/video_core/renderer_vulkan/vk_descriptor_pool.cpp
@@ -7,7 +7,8 @@
#include "common/common_types.h"
#include "video_core/renderer_vulkan/vk_descriptor_pool.h"
#include "video_core/renderer_vulkan/vk_device.h"
-#include "video_core/renderer_vulkan/vk_resource_manager.h"
+#include "video_core/renderer_vulkan/vk_resource_pool.h"
+#include "video_core/renderer_vulkan/vk_scheduler.h"
#include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan {
@@ -15,14 +16,15 @@ namespace Vulkan {
// Prefer small grow rates to avoid saturating the descriptor pool with barely used pipelines.
constexpr std::size_t SETS_GROW_RATE = 0x20;
-DescriptorAllocator::DescriptorAllocator(VKDescriptorPool& descriptor_pool,
- VkDescriptorSetLayout layout)
- : VKFencedPool{SETS_GROW_RATE}, descriptor_pool{descriptor_pool}, layout{layout} {}
+DescriptorAllocator::DescriptorAllocator(VKDescriptorPool& descriptor_pool_,
+ VkDescriptorSetLayout layout_)
+ : ResourcePool(descriptor_pool_.master_semaphore, SETS_GROW_RATE),
+ descriptor_pool{descriptor_pool_}, layout{layout_} {}
DescriptorAllocator::~DescriptorAllocator() = default;
-VkDescriptorSet DescriptorAllocator::Commit(VKFence& fence) {
- const std::size_t index = CommitResource(fence);
+VkDescriptorSet DescriptorAllocator::Commit() {
+ const std::size_t index = CommitResource();
return descriptors_allocations[index / SETS_GROW_RATE][index % SETS_GROW_RATE];
}
@@ -30,8 +32,9 @@ void DescriptorAllocator::Allocate(std::size_t begin, std::size_t end) {
descriptors_allocations.push_back(descriptor_pool.AllocateDescriptors(layout, end - begin));
}
-VKDescriptorPool::VKDescriptorPool(const VKDevice& device)
- : device{device}, active_pool{AllocateNewPool()} {}
+VKDescriptorPool::VKDescriptorPool(const VKDevice& device_, VKScheduler& scheduler)
+ : device{device_}, master_semaphore{scheduler.GetMasterSemaphore()}, active_pool{
+ AllocateNewPool()} {}
VKDescriptorPool::~VKDescriptorPool() = default;
diff --git a/src/video_core/renderer_vulkan/vk_descriptor_pool.h b/src/video_core/renderer_vulkan/vk_descriptor_pool.h
index 9efa66bef..544f32a20 100644
--- a/src/video_core/renderer_vulkan/vk_descriptor_pool.h
+++ b/src/video_core/renderer_vulkan/vk_descriptor_pool.h
@@ -6,21 +6,24 @@
#include <vector>
-#include "video_core/renderer_vulkan/vk_resource_manager.h"
+#include "video_core/renderer_vulkan/vk_resource_pool.h"
#include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan {
+class VKDevice;
class VKDescriptorPool;
+class VKScheduler;
-class DescriptorAllocator final : public VKFencedPool {
+class DescriptorAllocator final : public ResourcePool {
public:
explicit DescriptorAllocator(VKDescriptorPool& descriptor_pool, VkDescriptorSetLayout layout);
~DescriptorAllocator() override;
+ DescriptorAllocator& operator=(const DescriptorAllocator&) = delete;
DescriptorAllocator(const DescriptorAllocator&) = delete;
- VkDescriptorSet Commit(VKFence& fence);
+ VkDescriptorSet Commit();
protected:
void Allocate(std::size_t begin, std::size_t end) override;
@@ -36,15 +39,19 @@ class VKDescriptorPool final {
friend DescriptorAllocator;
public:
- explicit VKDescriptorPool(const VKDevice& device);
+ explicit VKDescriptorPool(const VKDevice& device, VKScheduler& scheduler);
~VKDescriptorPool();
+ VKDescriptorPool(const VKDescriptorPool&) = delete;
+ VKDescriptorPool& operator=(const VKDescriptorPool&) = delete;
+
private:
vk::DescriptorPool* AllocateNewPool();
vk::DescriptorSets AllocateDescriptors(VkDescriptorSetLayout layout, std::size_t count);
const VKDevice& device;
+ MasterSemaphore& master_semaphore;
std::vector<vk::DescriptorPool> pools;
vk::DescriptorPool* active_pool;
diff --git a/src/video_core/renderer_vulkan/vk_device.cpp b/src/video_core/renderer_vulkan/vk_device.cpp
index 90916ee0e..e1217ca83 100644
--- a/src/video_core/renderer_vulkan/vk_device.cpp
+++ b/src/video_core/renderer_vulkan/vk_device.cpp
@@ -45,6 +45,7 @@ constexpr std::array REQUIRED_EXTENSIONS{
VK_KHR_8BIT_STORAGE_EXTENSION_NAME,
VK_KHR_DRIVER_PROPERTIES_EXTENSION_NAME,
VK_KHR_DESCRIPTOR_UPDATE_TEMPLATE_EXTENSION_NAME,
+ VK_KHR_TIMELINE_SEMAPHORE_EXTENSION_NAME,
VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_EXTENSION_NAME,
VK_EXT_SHADER_SUBGROUP_BALLOT_EXTENSION_NAME,
VK_EXT_SHADER_SUBGROUP_VOTE_EXTENSION_NAME,
@@ -81,6 +82,21 @@ VkFormatFeatureFlags GetFormatFeatures(VkFormatProperties properties, FormatType
}
}
+[[nodiscard]] bool IsRDNA(std::string_view device_name, VkDriverIdKHR driver_id) {
+ static constexpr std::array RDNA_DEVICES{
+ "5700",
+ "5600",
+ "5500",
+ "5300",
+ };
+ if (driver_id != VK_DRIVER_ID_AMD_PROPRIETARY_KHR) {
+ return false;
+ }
+ return std::any_of(RDNA_DEVICES.begin(), RDNA_DEVICES.end(), [device_name](const char* name) {
+ return device_name.find(name) != std::string_view::npos;
+ });
+}
+
std::unordered_map<VkFormat, VkFormatProperties> GetFormatProperties(
vk::PhysicalDevice physical, const vk::InstanceDispatch& dld) {
static constexpr std::array formats{
@@ -253,6 +269,13 @@ bool VKDevice::Create() {
.inheritedQueries = false,
};
+ VkPhysicalDeviceTimelineSemaphoreFeaturesKHR timeline_semaphore{
+ .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES_KHR,
+ .pNext = nullptr,
+ .timelineSemaphore = true,
+ };
+ SetNext(next, timeline_semaphore);
+
VkPhysicalDevice16BitStorageFeaturesKHR bit16_storage{
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES_KHR,
.pNext = nullptr,
@@ -383,6 +406,15 @@ bool VKDevice::Create() {
CollectTelemetryParameters();
+ if (ext_extended_dynamic_state && IsRDNA(properties.deviceName, driver_id)) {
+ // AMD's proprietary driver supports VK_EXT_extended_dynamic_state but on RDNA devices it
+ // seems to cause stability issues
+ LOG_WARNING(
+ Render_Vulkan,
+ "Blacklisting AMD proprietary on RDNA devices from VK_EXT_extended_dynamic_state");
+ ext_extended_dynamic_state = false;
+ }
+
graphics_queue = logical.GetQueue(graphics_family);
present_queue = logical.GetQueue(present_family);
diff --git a/src/video_core/renderer_vulkan/vk_fence_manager.cpp b/src/video_core/renderer_vulkan/vk_fence_manager.cpp
index d7f65d435..5babbdd0b 100644
--- a/src/video_core/renderer_vulkan/vk_fence_manager.cpp
+++ b/src/video_core/renderer_vulkan/vk_fence_manager.cpp
@@ -29,8 +29,8 @@ void InnerFence::Queue() {
}
ASSERT(!event);
- event = device.GetLogical().CreateNewEvent();
- ticks = scheduler.Ticks();
+ event = device.GetLogical().CreateEvent();
+ ticks = scheduler.CurrentTick();
scheduler.RequestOutsideRenderPassOperationContext();
scheduler.Record([event = *event](vk::CommandBuffer cmdbuf) {
@@ -52,7 +52,7 @@ void InnerFence::Wait() {
}
ASSERT(event);
- if (ticks >= scheduler.Ticks()) {
+ if (ticks >= scheduler.CurrentTick()) {
scheduler.Flush();
}
while (!IsEventSignalled()) {
@@ -71,12 +71,12 @@ bool InnerFence::IsEventSignalled() const {
}
}
-VKFenceManager::VKFenceManager(Core::System& system, VideoCore::RasterizerInterface& rasterizer,
- const VKDevice& device, VKScheduler& scheduler,
- VKTextureCache& texture_cache, VKBufferCache& buffer_cache,
- VKQueryCache& query_cache)
- : GenericFenceManager(system, rasterizer, texture_cache, buffer_cache, query_cache),
- device{device}, scheduler{scheduler} {}
+VKFenceManager::VKFenceManager(VideoCore::RasterizerInterface& rasterizer, Tegra::GPU& gpu,
+ Tegra::MemoryManager& memory_manager, VKTextureCache& texture_cache,
+ VKBufferCache& buffer_cache, VKQueryCache& query_cache,
+ const VKDevice& device_, VKScheduler& scheduler_)
+ : GenericFenceManager(rasterizer, gpu, texture_cache, buffer_cache, query_cache),
+ device{device_}, scheduler{scheduler_} {}
Fence VKFenceManager::CreateFence(u32 value, bool is_stubbed) {
return std::make_shared<InnerFence>(device, scheduler, value, is_stubbed);
diff --git a/src/video_core/renderer_vulkan/vk_fence_manager.h b/src/video_core/renderer_vulkan/vk_fence_manager.h
index 043fe7947..1547d6d30 100644
--- a/src/video_core/renderer_vulkan/vk_fence_manager.h
+++ b/src/video_core/renderer_vulkan/vk_fence_manager.h
@@ -55,10 +55,10 @@ using GenericFenceManager =
class VKFenceManager final : public GenericFenceManager {
public:
- explicit VKFenceManager(Core::System& system, VideoCore::RasterizerInterface& rasterizer,
- const VKDevice& device, VKScheduler& scheduler,
- VKTextureCache& texture_cache, VKBufferCache& buffer_cache,
- VKQueryCache& query_cache);
+ explicit VKFenceManager(VideoCore::RasterizerInterface& rasterizer, Tegra::GPU& gpu,
+ Tegra::MemoryManager& memory_manager, VKTextureCache& texture_cache,
+ VKBufferCache& buffer_cache, VKQueryCache& query_cache,
+ const VKDevice& device, VKScheduler& scheduler);
protected:
Fence CreateFence(u32 value, bool is_stubbed) override;
diff --git a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp
index 2e46c6278..696eaeb5f 100644
--- a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp
+++ b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp
@@ -93,7 +93,7 @@ VkDescriptorSet VKGraphicsPipeline::CommitDescriptorSet() {
if (!descriptor_template) {
return {};
}
- const auto set = descriptor_allocator.Commit(scheduler.GetFence());
+ const VkDescriptorSet set = descriptor_allocator.Commit();
update_descriptor_queue.Send(*descriptor_template, set);
return set;
}
@@ -261,12 +261,12 @@ vk::Pipeline VKGraphicsPipeline::CreatePipeline(const RenderPassParams& renderpa
vertex_input_ci.pNext = &input_divisor_ci;
}
- const auto input_assembly_topology = MaxwellToVK::PrimitiveTopology(device, dynamic.Topology());
+ const auto input_assembly_topology = MaxwellToVK::PrimitiveTopology(device, state.topology);
const VkPipelineInputAssemblyStateCreateInfo input_assembly_ci{
.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
- .topology = MaxwellToVK::PrimitiveTopology(device, dynamic.Topology()),
+ .topology = MaxwellToVK::PrimitiveTopology(device, state.topology),
.primitiveRestartEnable = state.primitive_restart_enable != 0 &&
SupportsPrimitiveRestart(input_assembly_topology),
};
@@ -400,7 +400,6 @@ vk::Pipeline VKGraphicsPipeline::CreatePipeline(const RenderPassParams& renderpa
static constexpr std::array extended{
VK_DYNAMIC_STATE_CULL_MODE_EXT,
VK_DYNAMIC_STATE_FRONT_FACE_EXT,
- VK_DYNAMIC_STATE_PRIMITIVE_TOPOLOGY_EXT,
VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE_EXT,
VK_DYNAMIC_STATE_DEPTH_TEST_ENABLE_EXT,
VK_DYNAMIC_STATE_DEPTH_WRITE_ENABLE_EXT,
diff --git a/src/video_core/renderer_vulkan/vk_master_semaphore.cpp b/src/video_core/renderer_vulkan/vk_master_semaphore.cpp
new file mode 100644
index 000000000..ae26e558d
--- /dev/null
+++ b/src/video_core/renderer_vulkan/vk_master_semaphore.cpp
@@ -0,0 +1,56 @@
+// Copyright 2020 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include <atomic>
+#include <chrono>
+
+#include "core/settings.h"
+#include "video_core/renderer_vulkan/vk_device.h"
+#include "video_core/renderer_vulkan/vk_master_semaphore.h"
+#include "video_core/renderer_vulkan/wrapper.h"
+
+namespace Vulkan {
+
+using namespace std::chrono_literals;
+
+MasterSemaphore::MasterSemaphore(const VKDevice& device) {
+ static constexpr VkSemaphoreTypeCreateInfoKHR semaphore_type_ci{
+ .sType = VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO_KHR,
+ .pNext = nullptr,
+ .semaphoreType = VK_SEMAPHORE_TYPE_TIMELINE_KHR,
+ .initialValue = 0,
+ };
+ static constexpr VkSemaphoreCreateInfo semaphore_ci{
+ .sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO,
+ .pNext = &semaphore_type_ci,
+ .flags = 0,
+ };
+ semaphore = device.GetLogical().CreateSemaphore(semaphore_ci);
+
+ if (!Settings::values.renderer_debug) {
+ return;
+ }
+ // Validation layers have a bug where they fail to track resource usage when using timeline
+ // semaphores and synchronizing with GetSemaphoreCounterValueKHR. To workaround this issue, have
+ // a separate thread waiting for each timeline semaphore value.
+ debug_thread = std::thread([this] {
+ u64 counter = 0;
+ while (!shutdown) {
+ if (semaphore.Wait(counter, 10'000'000)) {
+ ++counter;
+ }
+ }
+ });
+}
+
+MasterSemaphore::~MasterSemaphore() {
+ shutdown = true;
+
+ // This thread might not be started
+ if (debug_thread.joinable()) {
+ debug_thread.join();
+ }
+}
+
+} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_master_semaphore.h b/src/video_core/renderer_vulkan/vk_master_semaphore.h
new file mode 100644
index 000000000..0e93706d7
--- /dev/null
+++ b/src/video_core/renderer_vulkan/vk_master_semaphore.h
@@ -0,0 +1,70 @@
+// Copyright 2020 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <atomic>
+#include <thread>
+
+#include "common/common_types.h"
+#include "video_core/renderer_vulkan/wrapper.h"
+
+namespace Vulkan {
+
+class VKDevice;
+
+class MasterSemaphore {
+public:
+ explicit MasterSemaphore(const VKDevice& device);
+ ~MasterSemaphore();
+
+ /// Returns the current logical tick.
+ [[nodiscard]] u64 CurrentTick() const noexcept {
+ return current_tick;
+ }
+
+ /// Returns the timeline semaphore handle.
+ [[nodiscard]] VkSemaphore Handle() const noexcept {
+ return *semaphore;
+ }
+
+ /// Returns true when a tick has been hit by the GPU.
+ [[nodiscard]] bool IsFree(u64 tick) {
+ return gpu_tick >= tick;
+ }
+
+ /// Advance to the logical tick.
+ void NextTick() noexcept {
+ ++current_tick;
+ }
+
+ /// Refresh the known GPU tick
+ void Refresh() {
+ gpu_tick = semaphore.GetCounter();
+ }
+
+ /// Waits for a tick to be hit on the GPU
+ void Wait(u64 tick) {
+ // No need to wait if the GPU is ahead of the tick
+ if (IsFree(tick)) {
+ return;
+ }
+ // Update the GPU tick and try again
+ Refresh();
+ if (IsFree(tick)) {
+ return;
+ }
+ // If none of the above is hit, fallback to a regular wait
+ semaphore.Wait(tick);
+ }
+
+private:
+ vk::Semaphore semaphore; ///< Timeline semaphore.
+ std::atomic<u64> gpu_tick{0}; ///< Current known GPU tick.
+ std::atomic<u64> current_tick{1}; ///< Current logical tick.
+ std::atomic<bool> shutdown{false}; ///< True when the object is being destroyed.
+ std::thread debug_thread; ///< Debug thread to workaround validation layer bugs.
+};
+
+} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
index cfdcdd6ab..dedc9c466 100644
--- a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
@@ -135,64 +135,56 @@ bool ComputePipelineCacheKey::operator==(const ComputePipelineCacheKey& rhs) con
return std::memcmp(&rhs, this, sizeof *this) == 0;
}
-Shader::Shader(Core::System& system, Tegra::Engines::ShaderType stage, GPUVAddr gpu_addr,
- VideoCommon::Shader::ProgramCode program_code, u32 main_offset)
- : gpu_addr{gpu_addr}, program_code{std::move(program_code)},
- registry{stage, GetEngine(system, stage)}, shader_ir{this->program_code, main_offset,
- compiler_settings, registry},
- entries{GenerateShaderEntries(shader_ir)} {}
+Shader::Shader(Tegra::Engines::ConstBufferEngineInterface& engine, Tegra::Engines::ShaderType stage,
+ GPUVAddr gpu_addr_, VAddr cpu_addr, VideoCommon::Shader::ProgramCode program_code_,
+ u32 main_offset)
+ : gpu_addr(gpu_addr_), program_code(std::move(program_code_)), registry(stage, engine),
+ shader_ir(program_code, main_offset, compiler_settings, registry),
+ entries(GenerateShaderEntries(shader_ir)) {}
Shader::~Shader() = default;
-Tegra::Engines::ConstBufferEngineInterface& Shader::GetEngine(Core::System& system,
- Tegra::Engines::ShaderType stage) {
- if (stage == ShaderType::Compute) {
- return system.GPU().KeplerCompute();
- } else {
- return system.GPU().Maxwell3D();
- }
-}
-
-VKPipelineCache::VKPipelineCache(Core::System& system, RasterizerVulkan& rasterizer,
- const VKDevice& device, VKScheduler& scheduler,
- VKDescriptorPool& descriptor_pool,
- VKUpdateDescriptorQueue& update_descriptor_queue,
- VKRenderPassCache& renderpass_cache)
- : VideoCommon::ShaderCache<Shader>{rasterizer}, system{system}, device{device},
- scheduler{scheduler}, descriptor_pool{descriptor_pool},
- update_descriptor_queue{update_descriptor_queue}, renderpass_cache{renderpass_cache} {}
+VKPipelineCache::VKPipelineCache(RasterizerVulkan& rasterizer, Tegra::GPU& gpu_,
+ Tegra::Engines::Maxwell3D& maxwell3d_,
+ Tegra::Engines::KeplerCompute& kepler_compute_,
+ Tegra::MemoryManager& gpu_memory_, const VKDevice& device_,
+ VKScheduler& scheduler_, VKDescriptorPool& descriptor_pool_,
+ VKUpdateDescriptorQueue& update_descriptor_queue_,
+ VKRenderPassCache& renderpass_cache_)
+ : VideoCommon::ShaderCache<Shader>{rasterizer}, gpu{gpu_}, maxwell3d{maxwell3d_},
+ kepler_compute{kepler_compute_}, gpu_memory{gpu_memory_}, device{device_},
+ scheduler{scheduler_}, descriptor_pool{descriptor_pool_},
+ update_descriptor_queue{update_descriptor_queue_}, renderpass_cache{renderpass_cache_} {}
VKPipelineCache::~VKPipelineCache() = default;
std::array<Shader*, Maxwell::MaxShaderProgram> VKPipelineCache::GetShaders() {
- const auto& gpu = system.GPU().Maxwell3D();
-
std::array<Shader*, Maxwell::MaxShaderProgram> shaders{};
+
for (std::size_t index = 0; index < Maxwell::MaxShaderProgram; ++index) {
const auto program{static_cast<Maxwell::ShaderProgram>(index)};
// Skip stages that are not enabled
- if (!gpu.regs.IsShaderConfigEnabled(index)) {
+ if (!maxwell3d.regs.IsShaderConfigEnabled(index)) {
continue;
}
- auto& memory_manager{system.GPU().MemoryManager()};
- const GPUVAddr program_addr{GetShaderAddress(system, program)};
- const std::optional cpu_addr = memory_manager.GpuToCpuAddress(program_addr);
+ const GPUVAddr gpu_addr{GetShaderAddress(maxwell3d, program)};
+ const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr);
ASSERT(cpu_addr);
Shader* result = cpu_addr ? TryGet(*cpu_addr) : null_shader.get();
if (!result) {
- const auto host_ptr{memory_manager.GetPointer(program_addr)};
+ const u8* const host_ptr{gpu_memory.GetPointer(gpu_addr)};
// No shader found - create a new one
- constexpr u32 stage_offset = STAGE_MAIN_OFFSET;
+ static constexpr u32 stage_offset = STAGE_MAIN_OFFSET;
const auto stage = static_cast<ShaderType>(index == 0 ? 0 : index - 1);
- ProgramCode code = GetShaderCode(memory_manager, program_addr, host_ptr, false);
+ ProgramCode code = GetShaderCode(gpu_memory, gpu_addr, host_ptr, false);
const std::size_t size_in_bytes = code.size() * sizeof(u64);
- auto shader = std::make_unique<Shader>(system, stage, program_addr, std::move(code),
- stage_offset);
+ auto shader = std::make_unique<Shader>(maxwell3d, stage, gpu_addr, *cpu_addr,
+ std::move(code), stage_offset);
result = shader.get();
if (cpu_addr) {
@@ -215,11 +207,11 @@ VKGraphicsPipeline* VKPipelineCache::GetGraphicsPipeline(
}
last_graphics_key = key;
- if (device.UseAsynchronousShaders() && async_shaders.IsShaderAsync(system.GPU())) {
+ if (device.UseAsynchronousShaders() && async_shaders.IsShaderAsync(gpu)) {
std::unique_lock lock{pipeline_cache};
const auto [pair, is_cache_miss] = graphics_cache.try_emplace(key);
if (is_cache_miss) {
- system.GPU().ShaderNotify().MarkSharderBuilding();
+ gpu.ShaderNotify().MarkSharderBuilding();
LOG_INFO(Render_Vulkan, "Compile 0x{:016X}", key.Hash());
const auto [program, bindings] = DecompileShaders(key.fixed_state);
async_shaders.QueueVulkanShader(this, device, scheduler, descriptor_pool,
@@ -233,13 +225,13 @@ VKGraphicsPipeline* VKPipelineCache::GetGraphicsPipeline(
const auto [pair, is_cache_miss] = graphics_cache.try_emplace(key);
auto& entry = pair->second;
if (is_cache_miss) {
- system.GPU().ShaderNotify().MarkSharderBuilding();
+ gpu.ShaderNotify().MarkSharderBuilding();
LOG_INFO(Render_Vulkan, "Compile 0x{:016X}", key.Hash());
const auto [program, bindings] = DecompileShaders(key.fixed_state);
entry = std::make_unique<VKGraphicsPipeline>(device, scheduler, descriptor_pool,
update_descriptor_queue, renderpass_cache, key,
bindings, program);
- system.GPU().ShaderNotify().MarkShaderComplete();
+ gpu.ShaderNotify().MarkShaderComplete();
}
last_graphics_pipeline = entry.get();
return last_graphics_pipeline;
@@ -255,22 +247,21 @@ VKComputePipeline& VKPipelineCache::GetComputePipeline(const ComputePipelineCach
}
LOG_INFO(Render_Vulkan, "Compile 0x{:016X}", key.Hash());
- auto& memory_manager = system.GPU().MemoryManager();
- const auto program_addr = key.shader;
+ const GPUVAddr gpu_addr = key.shader;
- const auto cpu_addr = memory_manager.GpuToCpuAddress(program_addr);
+ const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr);
ASSERT(cpu_addr);
Shader* shader = cpu_addr ? TryGet(*cpu_addr) : null_kernel.get();
if (!shader) {
// No shader found - create a new one
- const auto host_ptr = memory_manager.GetPointer(program_addr);
+ const auto host_ptr = gpu_memory.GetPointer(gpu_addr);
- ProgramCode code = GetShaderCode(memory_manager, program_addr, host_ptr, true);
+ ProgramCode code = GetShaderCode(gpu_memory, gpu_addr, host_ptr, true);
const std::size_t size_in_bytes = code.size() * sizeof(u64);
- auto shader_info = std::make_unique<Shader>(system, ShaderType::Compute, program_addr,
- std::move(code), KERNEL_MAIN_OFFSET);
+ auto shader_info = std::make_unique<Shader>(kepler_compute, ShaderType::Compute, gpu_addr,
+ *cpu_addr, std::move(code), KERNEL_MAIN_OFFSET);
shader = shader_info.get();
if (cpu_addr) {
@@ -298,7 +289,7 @@ VKComputePipeline& VKPipelineCache::GetComputePipeline(const ComputePipelineCach
}
void VKPipelineCache::EmplacePipeline(std::unique_ptr<VKGraphicsPipeline> pipeline) {
- system.GPU().ShaderNotify().MarkShaderComplete();
+ gpu.ShaderNotify().MarkShaderComplete();
std::unique_lock lock{pipeline_cache};
graphics_cache.at(pipeline->GetCacheKey()) = std::move(pipeline);
}
@@ -339,12 +330,8 @@ void VKPipelineCache::OnShaderRemoval(Shader* shader) {
std::pair<SPIRVProgram, std::vector<VkDescriptorSetLayoutBinding>>
VKPipelineCache::DecompileShaders(const FixedPipelineState& fixed_state) {
- auto& memory_manager = system.GPU().MemoryManager();
- const auto& gpu = system.GPU().Maxwell3D();
-
Specialization specialization;
- if (fixed_state.dynamic_state.Topology() == Maxwell::PrimitiveTopology::Points ||
- device.IsExtExtendedDynamicStateSupported()) {
+ if (fixed_state.topology == Maxwell::PrimitiveTopology::Points) {
float point_size;
std::memcpy(&point_size, &fixed_state.point_size, sizeof(float));
specialization.point_size = point_size;
@@ -364,12 +351,12 @@ VKPipelineCache::DecompileShaders(const FixedPipelineState& fixed_state) {
const auto program_enum = static_cast<Maxwell::ShaderProgram>(index);
// Skip stages that are not enabled
- if (!gpu.regs.IsShaderConfigEnabled(index)) {
+ if (!maxwell3d.regs.IsShaderConfigEnabled(index)) {
continue;
}
- const GPUVAddr gpu_addr = GetShaderAddress(system, program_enum);
- const std::optional<VAddr> cpu_addr = memory_manager.GpuToCpuAddress(gpu_addr);
+ const GPUVAddr gpu_addr = GetShaderAddress(maxwell3d, program_enum);
+ const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr);
Shader* const shader = cpu_addr ? TryGet(*cpu_addr) : null_shader.get();
const std::size_t stage = index == 0 ? 0 : index - 1; // Stage indices are 0 - 5
diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.h b/src/video_core/renderer_vulkan/vk_pipeline_cache.h
index c04829e77..e558e6658 100644
--- a/src/video_core/renderer_vulkan/vk_pipeline_cache.h
+++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.h
@@ -38,7 +38,6 @@ class RasterizerVulkan;
class VKComputePipeline;
class VKDescriptorPool;
class VKDevice;
-class VKFence;
class VKScheduler;
class VKUpdateDescriptorQueue;
@@ -85,7 +84,8 @@ namespace Vulkan {
class Shader {
public:
- explicit Shader(Core::System& system, Tegra::Engines::ShaderType stage, GPUVAddr gpu_addr,
+ explicit Shader(Tegra::Engines::ConstBufferEngineInterface& engine,
+ Tegra::Engines::ShaderType stage, GPUVAddr gpu_addr, VAddr cpu_addr,
VideoCommon::Shader::ProgramCode program_code, u32 main_offset);
~Shader();
@@ -97,22 +97,19 @@ public:
return shader_ir;
}
- const VideoCommon::Shader::Registry& GetRegistry() const {
- return registry;
- }
-
const VideoCommon::Shader::ShaderIR& GetIR() const {
return shader_ir;
}
+ const VideoCommon::Shader::Registry& GetRegistry() const {
+ return registry;
+ }
+
const ShaderEntries& GetEntries() const {
return entries;
}
private:
- static Tegra::Engines::ConstBufferEngineInterface& GetEngine(Core::System& system,
- Tegra::Engines::ShaderType stage);
-
GPUVAddr gpu_addr{};
VideoCommon::Shader::ProgramCode program_code;
VideoCommon::Shader::Registry registry;
@@ -122,9 +119,11 @@ private:
class VKPipelineCache final : public VideoCommon::ShaderCache<Shader> {
public:
- explicit VKPipelineCache(Core::System& system, RasterizerVulkan& rasterizer,
- const VKDevice& device, VKScheduler& scheduler,
- VKDescriptorPool& descriptor_pool,
+ explicit VKPipelineCache(RasterizerVulkan& rasterizer, Tegra::GPU& gpu,
+ Tegra::Engines::Maxwell3D& maxwell3d,
+ Tegra::Engines::KeplerCompute& kepler_compute,
+ Tegra::MemoryManager& gpu_memory, const VKDevice& device,
+ VKScheduler& scheduler, VKDescriptorPool& descriptor_pool,
VKUpdateDescriptorQueue& update_descriptor_queue,
VKRenderPassCache& renderpass_cache);
~VKPipelineCache() override;
@@ -145,7 +144,11 @@ private:
std::pair<SPIRVProgram, std::vector<VkDescriptorSetLayoutBinding>> DecompileShaders(
const FixedPipelineState& fixed_state);
- Core::System& system;
+ Tegra::GPU& gpu;
+ Tegra::Engines::Maxwell3D& maxwell3d;
+ Tegra::Engines::KeplerCompute& kepler_compute;
+ Tegra::MemoryManager& gpu_memory;
+
const VKDevice& device;
VKScheduler& scheduler;
VKDescriptorPool& descriptor_pool;
diff --git a/src/video_core/renderer_vulkan/vk_query_cache.cpp b/src/video_core/renderer_vulkan/vk_query_cache.cpp
index 6cd63d090..ee2d871e3 100644
--- a/src/video_core/renderer_vulkan/vk_query_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_query_cache.cpp
@@ -9,35 +9,33 @@
#include "video_core/renderer_vulkan/vk_device.h"
#include "video_core/renderer_vulkan/vk_query_cache.h"
-#include "video_core/renderer_vulkan/vk_resource_manager.h"
+#include "video_core/renderer_vulkan/vk_resource_pool.h"
#include "video_core/renderer_vulkan/vk_scheduler.h"
#include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan {
+using VideoCore::QueryType;
+
namespace {
constexpr std::array QUERY_TARGETS = {VK_QUERY_TYPE_OCCLUSION};
-constexpr VkQueryType GetTarget(VideoCore::QueryType type) {
+constexpr VkQueryType GetTarget(QueryType type) {
return QUERY_TARGETS[static_cast<std::size_t>(type)];
}
} // Anonymous namespace
-QueryPool::QueryPool() : VKFencedPool{GROW_STEP} {}
+QueryPool::QueryPool(const VKDevice& device_, VKScheduler& scheduler, QueryType type_)
+ : ResourcePool{scheduler.GetMasterSemaphore(), GROW_STEP}, device{device_}, type{type_} {}
QueryPool::~QueryPool() = default;
-void QueryPool::Initialize(const VKDevice& device_, VideoCore::QueryType type_) {
- device = &device_;
- type = type_;
-}
-
-std::pair<VkQueryPool, u32> QueryPool::Commit(VKFence& fence) {
+std::pair<VkQueryPool, u32> QueryPool::Commit() {
std::size_t index;
do {
- index = CommitResource(fence);
+ index = CommitResource();
} while (usage[index]);
usage[index] = true;
@@ -47,7 +45,7 @@ std::pair<VkQueryPool, u32> QueryPool::Commit(VKFence& fence) {
void QueryPool::Allocate(std::size_t begin, std::size_t end) {
usage.resize(end);
- pools.push_back(device->GetLogical().CreateQueryPool({
+ pools.push_back(device.GetLogical().CreateQueryPool({
.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
@@ -68,30 +66,39 @@ void QueryPool::Reserve(std::pair<VkQueryPool, u32> query) {
usage[pool_index * GROW_STEP + static_cast<std::ptrdiff_t>(query.second)] = false;
}
-VKQueryCache::VKQueryCache(Core::System& system, VideoCore::RasterizerInterface& rasterizer,
+VKQueryCache::VKQueryCache(VideoCore::RasterizerInterface& rasterizer,
+ Tegra::Engines::Maxwell3D& maxwell3d, Tegra::MemoryManager& gpu_memory,
const VKDevice& device, VKScheduler& scheduler)
- : VideoCommon::QueryCacheBase<VKQueryCache, CachedQuery, CounterStream, HostCounter,
- QueryPool>{system, rasterizer},
- device{device}, scheduler{scheduler} {
- for (std::size_t i = 0; i < static_cast<std::size_t>(VideoCore::NumQueryTypes); ++i) {
- query_pools[i].Initialize(device, static_cast<VideoCore::QueryType>(i));
+ : VideoCommon::QueryCacheBase<VKQueryCache, CachedQuery, CounterStream,
+ HostCounter>{rasterizer, maxwell3d, gpu_memory},
+ device{device}, scheduler{scheduler}, query_pools{
+ QueryPool{device, scheduler,
+ QueryType::SamplesPassed},
+ } {}
+
+VKQueryCache::~VKQueryCache() {
+ // TODO(Rodrigo): This is a hack to destroy all HostCounter instances before the base class
+ // destructor is called. The query cache should be redesigned to have a proper ownership model
+ // instead of using shared pointers.
+ for (size_t query_type = 0; query_type < VideoCore::NumQueryTypes; ++query_type) {
+ auto& stream = Stream(static_cast<QueryType>(query_type));
+ stream.Update(false);
+ stream.Reset();
}
}
-VKQueryCache::~VKQueryCache() = default;
-
-std::pair<VkQueryPool, u32> VKQueryCache::AllocateQuery(VideoCore::QueryType type) {
- return query_pools[static_cast<std::size_t>(type)].Commit(scheduler.GetFence());
+std::pair<VkQueryPool, u32> VKQueryCache::AllocateQuery(QueryType type) {
+ return query_pools[static_cast<std::size_t>(type)].Commit();
}
-void VKQueryCache::Reserve(VideoCore::QueryType type, std::pair<VkQueryPool, u32> query) {
+void VKQueryCache::Reserve(QueryType type, std::pair<VkQueryPool, u32> query) {
query_pools[static_cast<std::size_t>(type)].Reserve(query);
}
HostCounter::HostCounter(VKQueryCache& cache, std::shared_ptr<HostCounter> dependency,
- VideoCore::QueryType type)
+ QueryType type)
: VideoCommon::HostCounterBase<VKQueryCache, HostCounter>{std::move(dependency)}, cache{cache},
- type{type}, query{cache.AllocateQuery(type)}, ticks{cache.Scheduler().Ticks()} {
+ type{type}, query{cache.AllocateQuery(type)}, tick{cache.Scheduler().CurrentTick()} {
const vk::Device* logical = &cache.Device().GetLogical();
cache.Scheduler().Record([logical, query = query](vk::CommandBuffer cmdbuf) {
logical->ResetQueryPoolEXT(query.first, query.second, 1);
@@ -109,7 +116,7 @@ void HostCounter::EndQuery() {
}
u64 HostCounter::BlockingQuery() const {
- if (ticks >= cache.Scheduler().Ticks()) {
+ if (tick >= cache.Scheduler().CurrentTick()) {
cache.Scheduler().Flush();
}
u64 data;
diff --git a/src/video_core/renderer_vulkan/vk_query_cache.h b/src/video_core/renderer_vulkan/vk_query_cache.h
index 40119e6d3..2e57fb75d 100644
--- a/src/video_core/renderer_vulkan/vk_query_cache.h
+++ b/src/video_core/renderer_vulkan/vk_query_cache.h
@@ -11,7 +11,7 @@
#include "common/common_types.h"
#include "video_core/query_cache.h"
-#include "video_core/renderer_vulkan/vk_resource_manager.h"
+#include "video_core/renderer_vulkan/vk_resource_pool.h"
#include "video_core/renderer_vulkan/wrapper.h"
namespace VideoCore {
@@ -28,14 +28,12 @@ class VKScheduler;
using CounterStream = VideoCommon::CounterStreamBase<VKQueryCache, HostCounter>;
-class QueryPool final : public VKFencedPool {
+class QueryPool final : public ResourcePool {
public:
- explicit QueryPool();
+ explicit QueryPool(const VKDevice& device, VKScheduler& scheduler, VideoCore::QueryType type);
~QueryPool() override;
- void Initialize(const VKDevice& device, VideoCore::QueryType type);
-
- std::pair<VkQueryPool, u32> Commit(VKFence& fence);
+ std::pair<VkQueryPool, u32> Commit();
void Reserve(std::pair<VkQueryPool, u32> query);
@@ -45,18 +43,18 @@ protected:
private:
static constexpr std::size_t GROW_STEP = 512;
- const VKDevice* device = nullptr;
- VideoCore::QueryType type = {};
+ const VKDevice& device;
+ const VideoCore::QueryType type;
std::vector<vk::QueryPool> pools;
std::vector<bool> usage;
};
class VKQueryCache final
- : public VideoCommon::QueryCacheBase<VKQueryCache, CachedQuery, CounterStream, HostCounter,
- QueryPool> {
+ : public VideoCommon::QueryCacheBase<VKQueryCache, CachedQuery, CounterStream, HostCounter> {
public:
- explicit VKQueryCache(Core::System& system, VideoCore::RasterizerInterface& rasterizer,
+ explicit VKQueryCache(VideoCore::RasterizerInterface& rasterizer,
+ Tegra::Engines::Maxwell3D& maxwell3d, Tegra::MemoryManager& gpu_memory,
const VKDevice& device, VKScheduler& scheduler);
~VKQueryCache();
@@ -75,6 +73,7 @@ public:
private:
const VKDevice& device;
VKScheduler& scheduler;
+ std::array<QueryPool, VideoCore::NumQueryTypes> query_pools;
};
class HostCounter final : public VideoCommon::HostCounterBase<VKQueryCache, HostCounter> {
@@ -91,7 +90,7 @@ private:
VKQueryCache& cache;
const VideoCore::QueryType type;
const std::pair<VkQueryPool, u32> query;
- const u64 ticks;
+ const u64 tick;
};
class CachedQuery : public VideoCommon::CachedQueryBase<HostCounter> {
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
index 936f76195..e0fb8693f 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
@@ -31,7 +31,6 @@
#include "video_core/renderer_vulkan/vk_pipeline_cache.h"
#include "video_core/renderer_vulkan/vk_rasterizer.h"
#include "video_core/renderer_vulkan/vk_renderpass_cache.h"
-#include "video_core/renderer_vulkan/vk_resource_manager.h"
#include "video_core/renderer_vulkan/vk_sampler_cache.h"
#include "video_core/renderer_vulkan/vk_scheduler.h"
#include "video_core/renderer_vulkan/vk_staging_buffer_pool.h"
@@ -381,28 +380,28 @@ void RasterizerVulkan::DrawParameters::Draw(vk::CommandBuffer cmdbuf) const {
}
}
-RasterizerVulkan::RasterizerVulkan(Core::System& system, Core::Frontend::EmuWindow& renderer,
- VKScreenInfo& screen_info, const VKDevice& device,
- VKResourceManager& resource_manager,
- VKMemoryManager& memory_manager, StateTracker& state_tracker,
- VKScheduler& scheduler)
- : RasterizerAccelerated{system.Memory()}, system{system}, render_window{renderer},
- screen_info{screen_info}, device{device}, resource_manager{resource_manager},
- memory_manager{memory_manager}, state_tracker{state_tracker}, scheduler{scheduler},
- staging_pool(device, memory_manager, scheduler), descriptor_pool(device),
- update_descriptor_queue(device, scheduler), renderpass_cache(device),
+RasterizerVulkan::RasterizerVulkan(Core::Frontend::EmuWindow& emu_window, Tegra::GPU& gpu_,
+ Tegra::MemoryManager& gpu_memory_,
+ Core::Memory::Memory& cpu_memory, VKScreenInfo& screen_info_,
+ const VKDevice& device_, VKMemoryManager& memory_manager_,
+ StateTracker& state_tracker_, VKScheduler& scheduler_)
+ : RasterizerAccelerated(cpu_memory), gpu(gpu_), gpu_memory(gpu_memory_),
+ maxwell3d(gpu.Maxwell3D()), kepler_compute(gpu.KeplerCompute()), screen_info(screen_info_),
+ device(device_), memory_manager(memory_manager_), state_tracker(state_tracker_),
+ scheduler(scheduler_), staging_pool(device, memory_manager, scheduler),
+ descriptor_pool(device, scheduler_), update_descriptor_queue(device, scheduler),
+ renderpass_cache(device),
quad_array_pass(device, scheduler, descriptor_pool, staging_pool, update_descriptor_queue),
quad_indexed_pass(device, scheduler, descriptor_pool, staging_pool, update_descriptor_queue),
uint8_pass(device, scheduler, descriptor_pool, staging_pool, update_descriptor_queue),
- texture_cache(system, *this, device, resource_manager, memory_manager, scheduler,
- staging_pool),
- pipeline_cache(system, *this, device, scheduler, descriptor_pool, update_descriptor_queue,
- renderpass_cache),
- buffer_cache(*this, system, device, memory_manager, scheduler, staging_pool),
- sampler_cache(device),
- fence_manager(system, *this, device, scheduler, texture_cache, buffer_cache, query_cache),
- query_cache(system, *this, device, scheduler),
- wfi_event{device.GetLogical().CreateNewEvent()}, async_shaders{renderer} {
+ texture_cache(*this, maxwell3d, gpu_memory, device, memory_manager, scheduler, staging_pool),
+ pipeline_cache(*this, gpu, maxwell3d, kepler_compute, gpu_memory, device, scheduler,
+ descriptor_pool, update_descriptor_queue, renderpass_cache),
+ buffer_cache(*this, gpu_memory, cpu_memory, device, memory_manager, scheduler, staging_pool),
+ sampler_cache(device), query_cache(*this, maxwell3d, gpu_memory, device, scheduler),
+ fence_manager(*this, gpu, gpu_memory, texture_cache, buffer_cache, query_cache, device,
+ scheduler),
+ wfi_event(device.GetLogical().CreateEvent()), async_shaders(emu_window) {
scheduler.SetQueryCache(query_cache);
if (device.UseAsynchronousShaders()) {
async_shaders.AllocateWorkers();
@@ -414,15 +413,13 @@ RasterizerVulkan::~RasterizerVulkan() = default;
void RasterizerVulkan::Draw(bool is_indexed, bool is_instanced) {
MICROPROFILE_SCOPE(Vulkan_Drawing);
+ SCOPE_EXIT({ gpu.TickWork(); });
FlushWork();
query_cache.UpdateCounters();
- SCOPE_EXIT({ system.GPU().TickWork(); });
-
- const auto& gpu = system.GPU().Maxwell3D();
GraphicsPipelineCacheKey key;
- key.fixed_state.Fill(gpu.regs, device.IsExtExtendedDynamicStateSupported());
+ key.fixed_state.Fill(maxwell3d.regs, device.IsExtExtendedDynamicStateSupported());
buffer_cache.Map(CalculateGraphicsStreamBufferSize(is_indexed));
@@ -480,8 +477,7 @@ void RasterizerVulkan::Draw(bool is_indexed, bool is_instanced) {
void RasterizerVulkan::Clear() {
MICROPROFILE_SCOPE(Vulkan_Clearing);
- const auto& gpu = system.GPU().Maxwell3D();
- if (!system.GPU().Maxwell3D().ShouldExecute()) {
+ if (!maxwell3d.ShouldExecute()) {
return;
}
@@ -490,7 +486,7 @@ void RasterizerVulkan::Clear() {
query_cache.UpdateCounters();
- const auto& regs = gpu.regs;
+ const auto& regs = maxwell3d.regs;
const bool use_color = regs.clear_buffers.R || regs.clear_buffers.G || regs.clear_buffers.B ||
regs.clear_buffers.A;
const bool use_depth = regs.clear_buffers.Z;
@@ -559,7 +555,7 @@ void RasterizerVulkan::DispatchCompute(GPUVAddr code_addr) {
query_cache.UpdateCounters();
- const auto& launch_desc = system.GPU().KeplerCompute().launch_description;
+ const auto& launch_desc = kepler_compute.launch_description;
auto& pipeline = pipeline_cache.GetComputePipeline({
.shader = code_addr,
.shared_memory_size = launch_desc.shared_alloc,
@@ -655,16 +651,14 @@ void RasterizerVulkan::SyncGuestHost() {
}
void RasterizerVulkan::SignalSemaphore(GPUVAddr addr, u32 value) {
- auto& gpu{system.GPU()};
if (!gpu.IsAsync()) {
- gpu.MemoryManager().Write<u32>(addr, value);
+ gpu_memory.Write<u32>(addr, value);
return;
}
fence_manager.SignalSemaphore(addr, value);
}
void RasterizerVulkan::SignalSyncPoint(u32 value) {
- auto& gpu{system.GPU()};
if (!gpu.IsAsync()) {
gpu.IncrementSyncPoint(value);
return;
@@ -673,7 +667,6 @@ void RasterizerVulkan::SignalSyncPoint(u32 value) {
}
void RasterizerVulkan::ReleaseFences() {
- auto& gpu{system.GPU()};
if (!gpu.IsAsync()) {
return;
}
@@ -751,10 +744,6 @@ bool RasterizerVulkan::AccelerateDisplay(const Tegra::FramebufferConfig& config,
return true;
}
-void RasterizerVulkan::SetupDirtyFlags() {
- state_tracker.Initialize();
-}
-
void RasterizerVulkan::FlushWork() {
static constexpr u32 DRAWS_TO_DISPATCH = 4096;
@@ -778,10 +767,9 @@ void RasterizerVulkan::FlushWork() {
RasterizerVulkan::Texceptions RasterizerVulkan::UpdateAttachments(bool is_clear) {
MICROPROFILE_SCOPE(Vulkan_RenderTargets);
- auto& maxwell3d = system.GPU().Maxwell3D();
- auto& dirty = maxwell3d.dirty.flags;
- auto& regs = maxwell3d.regs;
+ const auto& regs = maxwell3d.regs;
+ auto& dirty = maxwell3d.dirty.flags;
const bool update_rendertargets = dirty[VideoCommon::Dirty::RenderTargets];
dirty[VideoCommon::Dirty::RenderTargets] = false;
@@ -844,7 +832,7 @@ std::tuple<VkFramebuffer, VkExtent2D> RasterizerVulkan::ConfigureFramebuffers(
return true;
};
- const auto& regs = system.GPU().Maxwell3D().regs;
+ const auto& regs = maxwell3d.regs;
const std::size_t num_attachments = static_cast<std::size_t>(regs.rt_control.count);
for (std::size_t index = 0; index < num_attachments; ++index) {
if (try_push(color_attachments[index])) {
@@ -880,13 +868,12 @@ RasterizerVulkan::DrawParameters RasterizerVulkan::SetupGeometry(FixedPipelineSt
bool is_instanced) {
MICROPROFILE_SCOPE(Vulkan_Geometry);
- const auto& gpu = system.GPU().Maxwell3D();
- const auto& regs = gpu.regs;
+ const auto& regs = maxwell3d.regs;
SetupVertexArrays(buffer_bindings);
const u32 base_instance = regs.vb_base_instance;
- const u32 num_instances = is_instanced ? gpu.mme_draw.instance_count : 1;
+ const u32 num_instances = is_instanced ? maxwell3d.mme_draw.instance_count : 1;
const u32 base_vertex = is_indexed ? regs.vb_element_base : regs.vertex_buffer.first;
const u32 num_vertices = is_indexed ? regs.index_array.count : regs.vertex_buffer.count;
@@ -947,7 +934,7 @@ void RasterizerVulkan::SetupImageTransitions(
}
void RasterizerVulkan::UpdateDynamicStates() {
- auto& regs = system.GPU().Maxwell3D().regs;
+ auto& regs = maxwell3d.regs;
UpdateViewportsState(regs);
UpdateScissorsState(regs);
UpdateDepthBias(regs);
@@ -961,14 +948,13 @@ void RasterizerVulkan::UpdateDynamicStates() {
UpdateDepthWriteEnable(regs);
UpdateDepthCompareOp(regs);
UpdateFrontFace(regs);
- UpdatePrimitiveTopology(regs);
UpdateStencilOp(regs);
UpdateStencilTestEnable(regs);
}
}
void RasterizerVulkan::BeginTransformFeedback() {
- const auto& regs = system.GPU().Maxwell3D().regs;
+ const auto& regs = maxwell3d.regs;
if (regs.tfb_enabled == 0) {
return;
}
@@ -1000,7 +986,7 @@ void RasterizerVulkan::BeginTransformFeedback() {
}
void RasterizerVulkan::EndTransformFeedback() {
- const auto& regs = system.GPU().Maxwell3D().regs;
+ const auto& regs = maxwell3d.regs;
if (regs.tfb_enabled == 0) {
return;
}
@@ -1013,7 +999,7 @@ void RasterizerVulkan::EndTransformFeedback() {
}
void RasterizerVulkan::SetupVertexArrays(BufferBindings& buffer_bindings) {
- const auto& regs = system.GPU().Maxwell3D().regs;
+ const auto& regs = maxwell3d.regs;
for (std::size_t index = 0; index < Maxwell::NumVertexArrays; ++index) {
const auto& vertex_array = regs.vertex_array[index];
@@ -1039,7 +1025,7 @@ void RasterizerVulkan::SetupIndexBuffer(BufferBindings& buffer_bindings, DrawPar
if (params.num_vertices == 0) {
return;
}
- const auto& regs = system.GPU().Maxwell3D().regs;
+ const auto& regs = maxwell3d.regs;
switch (regs.draw.topology) {
case Maxwell::PrimitiveTopology::Quads: {
if (!params.is_indexed) {
@@ -1087,8 +1073,7 @@ void RasterizerVulkan::SetupIndexBuffer(BufferBindings& buffer_bindings, DrawPar
void RasterizerVulkan::SetupGraphicsConstBuffers(const ShaderEntries& entries, std::size_t stage) {
MICROPROFILE_SCOPE(Vulkan_ConstBuffers);
- const auto& gpu = system.GPU().Maxwell3D();
- const auto& shader_stage = gpu.state.shader_stages[stage];
+ const auto& shader_stage = maxwell3d.state.shader_stages[stage];
for (const auto& entry : entries.const_buffers) {
SetupConstBuffer(entry, shader_stage.const_buffers[entry.GetIndex()]);
}
@@ -1096,8 +1081,7 @@ void RasterizerVulkan::SetupGraphicsConstBuffers(const ShaderEntries& entries, s
void RasterizerVulkan::SetupGraphicsGlobalBuffers(const ShaderEntries& entries, std::size_t stage) {
MICROPROFILE_SCOPE(Vulkan_GlobalBuffers);
- auto& gpu{system.GPU()};
- const auto cbufs{gpu.Maxwell3D().state.shader_stages[stage]};
+ const auto& cbufs{maxwell3d.state.shader_stages[stage]};
for (const auto& entry : entries.global_buffers) {
const auto addr = cbufs.const_buffers[entry.GetCbufIndex()].address + entry.GetCbufOffset();
@@ -1107,19 +1091,17 @@ void RasterizerVulkan::SetupGraphicsGlobalBuffers(const ShaderEntries& entries,
void RasterizerVulkan::SetupGraphicsUniformTexels(const ShaderEntries& entries, std::size_t stage) {
MICROPROFILE_SCOPE(Vulkan_Textures);
- const auto& gpu = system.GPU().Maxwell3D();
for (const auto& entry : entries.uniform_texels) {
- const auto image = GetTextureInfo(gpu, entry, stage).tic;
+ const auto image = GetTextureInfo(maxwell3d, entry, stage).tic;
SetupUniformTexels(image, entry);
}
}
void RasterizerVulkan::SetupGraphicsTextures(const ShaderEntries& entries, std::size_t stage) {
MICROPROFILE_SCOPE(Vulkan_Textures);
- const auto& gpu = system.GPU().Maxwell3D();
for (const auto& entry : entries.samplers) {
for (std::size_t i = 0; i < entry.size; ++i) {
- const auto texture = GetTextureInfo(gpu, entry, stage, i);
+ const auto texture = GetTextureInfo(maxwell3d, entry, stage, i);
SetupTexture(texture, entry);
}
}
@@ -1127,25 +1109,23 @@ void RasterizerVulkan::SetupGraphicsTextures(const ShaderEntries& entries, std::
void RasterizerVulkan::SetupGraphicsStorageTexels(const ShaderEntries& entries, std::size_t stage) {
MICROPROFILE_SCOPE(Vulkan_Textures);
- const auto& gpu = system.GPU().Maxwell3D();
for (const auto& entry : entries.storage_texels) {
- const auto image = GetTextureInfo(gpu, entry, stage).tic;
+ const auto image = GetTextureInfo(maxwell3d, entry, stage).tic;
SetupStorageTexel(image, entry);
}
}
void RasterizerVulkan::SetupGraphicsImages(const ShaderEntries& entries, std::size_t stage) {
MICROPROFILE_SCOPE(Vulkan_Images);
- const auto& gpu = system.GPU().Maxwell3D();
for (const auto& entry : entries.images) {
- const auto tic = GetTextureInfo(gpu, entry, stage).tic;
+ const auto tic = GetTextureInfo(maxwell3d, entry, stage).tic;
SetupImage(tic, entry);
}
}
void RasterizerVulkan::SetupComputeConstBuffers(const ShaderEntries& entries) {
MICROPROFILE_SCOPE(Vulkan_ConstBuffers);
- const auto& launch_desc = system.GPU().KeplerCompute().launch_description;
+ const auto& launch_desc = kepler_compute.launch_description;
for (const auto& entry : entries.const_buffers) {
const auto& config = launch_desc.const_buffer_config[entry.GetIndex()];
const std::bitset<8> mask = launch_desc.const_buffer_enable_mask.Value();
@@ -1159,7 +1139,7 @@ void RasterizerVulkan::SetupComputeConstBuffers(const ShaderEntries& entries) {
void RasterizerVulkan::SetupComputeGlobalBuffers(const ShaderEntries& entries) {
MICROPROFILE_SCOPE(Vulkan_GlobalBuffers);
- const auto cbufs{system.GPU().KeplerCompute().launch_description.const_buffer_config};
+ const auto& cbufs{kepler_compute.launch_description.const_buffer_config};
for (const auto& entry : entries.global_buffers) {
const auto addr{cbufs[entry.GetCbufIndex()].Address() + entry.GetCbufOffset()};
SetupGlobalBuffer(entry, addr);
@@ -1168,19 +1148,17 @@ void RasterizerVulkan::SetupComputeGlobalBuffers(const ShaderEntries& entries) {
void RasterizerVulkan::SetupComputeUniformTexels(const ShaderEntries& entries) {
MICROPROFILE_SCOPE(Vulkan_Textures);
- const auto& gpu = system.GPU().KeplerCompute();
for (const auto& entry : entries.uniform_texels) {
- const auto image = GetTextureInfo(gpu, entry, ComputeShaderIndex).tic;
+ const auto image = GetTextureInfo(kepler_compute, entry, ComputeShaderIndex).tic;
SetupUniformTexels(image, entry);
}
}
void RasterizerVulkan::SetupComputeTextures(const ShaderEntries& entries) {
MICROPROFILE_SCOPE(Vulkan_Textures);
- const auto& gpu = system.GPU().KeplerCompute();
for (const auto& entry : entries.samplers) {
for (std::size_t i = 0; i < entry.size; ++i) {
- const auto texture = GetTextureInfo(gpu, entry, ComputeShaderIndex, i);
+ const auto texture = GetTextureInfo(kepler_compute, entry, ComputeShaderIndex, i);
SetupTexture(texture, entry);
}
}
@@ -1188,18 +1166,16 @@ void RasterizerVulkan::SetupComputeTextures(const ShaderEntries& entries) {
void RasterizerVulkan::SetupComputeStorageTexels(const ShaderEntries& entries) {
MICROPROFILE_SCOPE(Vulkan_Textures);
- const auto& gpu = system.GPU().KeplerCompute();
for (const auto& entry : entries.storage_texels) {
- const auto image = GetTextureInfo(gpu, entry, ComputeShaderIndex).tic;
+ const auto image = GetTextureInfo(kepler_compute, entry, ComputeShaderIndex).tic;
SetupStorageTexel(image, entry);
}
}
void RasterizerVulkan::SetupComputeImages(const ShaderEntries& entries) {
MICROPROFILE_SCOPE(Vulkan_Images);
- const auto& gpu = system.GPU().KeplerCompute();
for (const auto& entry : entries.images) {
- const auto tic = GetTextureInfo(gpu, entry, ComputeShaderIndex).tic;
+ const auto tic = GetTextureInfo(kepler_compute, entry, ComputeShaderIndex).tic;
SetupImage(tic, entry);
}
}
@@ -1223,9 +1199,8 @@ void RasterizerVulkan::SetupConstBuffer(const ConstBufferEntry& entry,
}
void RasterizerVulkan::SetupGlobalBuffer(const GlobalBufferEntry& entry, GPUVAddr address) {
- auto& memory_manager{system.GPU().MemoryManager()};
- const auto actual_addr = memory_manager.Read<u64>(address);
- const auto size = memory_manager.Read<u32>(address + 8);
+ const u64 actual_addr = gpu_memory.Read<u64>(address);
+ const u32 size = gpu_memory.Read<u32>(address + 8);
if (size == 0) {
// Sometimes global memory pointers don't have a proper size. Upload a dummy entry
@@ -1442,16 +1417,6 @@ void RasterizerVulkan::UpdateFrontFace(Tegra::Engines::Maxwell3D::Regs& regs) {
[front_face](vk::CommandBuffer cmdbuf) { cmdbuf.SetFrontFaceEXT(front_face); });
}
-void RasterizerVulkan::UpdatePrimitiveTopology(Tegra::Engines::Maxwell3D::Regs& regs) {
- if (!state_tracker.TouchPrimitiveTopology()) {
- return;
- }
- const Maxwell::PrimitiveTopology primitive_topology = regs.draw.topology.Value();
- scheduler.Record([this, primitive_topology](vk::CommandBuffer cmdbuf) {
- cmdbuf.SetPrimitiveTopologyEXT(MaxwellToVK::PrimitiveTopology(device, primitive_topology));
- });
-}
-
void RasterizerVulkan::UpdateStencilOp(Tegra::Engines::Maxwell3D::Regs& regs) {
if (!state_tracker.TouchStencilOp()) {
return;
@@ -1508,7 +1473,7 @@ std::size_t RasterizerVulkan::CalculateComputeStreamBufferSize() const {
}
std::size_t RasterizerVulkan::CalculateVertexArraysSize() const {
- const auto& regs = system.GPU().Maxwell3D().regs;
+ const auto& regs = maxwell3d.regs;
std::size_t size = 0;
for (u32 index = 0; index < Maxwell::NumVertexArrays; ++index) {
@@ -1523,9 +1488,8 @@ std::size_t RasterizerVulkan::CalculateVertexArraysSize() const {
}
std::size_t RasterizerVulkan::CalculateIndexBufferSize() const {
- const auto& regs = system.GPU().Maxwell3D().regs;
- return static_cast<std::size_t>(regs.index_array.count) *
- static_cast<std::size_t>(regs.index_array.FormatSizeInBytes());
+ return static_cast<std::size_t>(maxwell3d.regs.index_array.count) *
+ static_cast<std::size_t>(maxwell3d.regs.index_array.FormatSizeInBytes());
}
std::size_t RasterizerVulkan::CalculateConstBufferSize(
@@ -1540,7 +1504,7 @@ std::size_t RasterizerVulkan::CalculateConstBufferSize(
}
RenderPassParams RasterizerVulkan::GetRenderPassParams(Texceptions texceptions) const {
- const auto& regs = system.GPU().Maxwell3D().regs;
+ const auto& regs = maxwell3d.regs;
const std::size_t num_attachments = static_cast<std::size_t>(regs.rt_control.count);
RenderPassParams params;
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.h b/src/video_core/renderer_vulkan/vk_rasterizer.h
index f640ba649..237e51fa4 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.h
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.h
@@ -25,7 +25,6 @@
#include "video_core/renderer_vulkan/vk_pipeline_cache.h"
#include "video_core/renderer_vulkan/vk_query_cache.h"
#include "video_core/renderer_vulkan/vk_renderpass_cache.h"
-#include "video_core/renderer_vulkan/vk_resource_manager.h"
#include "video_core/renderer_vulkan/vk_sampler_cache.h"
#include "video_core/renderer_vulkan/vk_scheduler.h"
#include "video_core/renderer_vulkan/vk_staging_buffer_pool.h"
@@ -106,10 +105,11 @@ struct ImageView {
class RasterizerVulkan final : public VideoCore::RasterizerAccelerated {
public:
- explicit RasterizerVulkan(Core::System& system, Core::Frontend::EmuWindow& render_window,
+ explicit RasterizerVulkan(Core::Frontend::EmuWindow& emu_window, Tegra::GPU& gpu,
+ Tegra::MemoryManager& gpu_memory, Core::Memory::Memory& cpu_memory,
VKScreenInfo& screen_info, const VKDevice& device,
- VKResourceManager& resource_manager, VKMemoryManager& memory_manager,
- StateTracker& state_tracker, VKScheduler& scheduler);
+ VKMemoryManager& memory_manager, StateTracker& state_tracker,
+ VKScheduler& scheduler);
~RasterizerVulkan() override;
void Draw(bool is_indexed, bool is_instanced) override;
@@ -135,7 +135,6 @@ public:
const Tegra::Engines::Fermi2D::Config& copy_config) override;
bool AccelerateDisplay(const Tegra::FramebufferConfig& config, VAddr framebuffer_addr,
u32 pixel_stride) override;
- void SetupDirtyFlags() override;
VideoCommon::Shader::AsyncShaders& GetAsyncShaders() {
return async_shaders;
@@ -260,7 +259,6 @@ private:
void UpdateDepthWriteEnable(Tegra::Engines::Maxwell3D::Regs& regs);
void UpdateDepthCompareOp(Tegra::Engines::Maxwell3D::Regs& regs);
void UpdateFrontFace(Tegra::Engines::Maxwell3D::Regs& regs);
- void UpdatePrimitiveTopology(Tegra::Engines::Maxwell3D::Regs& regs);
void UpdateStencilOp(Tegra::Engines::Maxwell3D::Regs& regs);
void UpdateStencilTestEnable(Tegra::Engines::Maxwell3D::Regs& regs);
@@ -279,11 +277,13 @@ private:
VkBuffer DefaultBuffer();
- Core::System& system;
- Core::Frontend::EmuWindow& render_window;
+ Tegra::GPU& gpu;
+ Tegra::MemoryManager& gpu_memory;
+ Tegra::Engines::Maxwell3D& maxwell3d;
+ Tegra::Engines::KeplerCompute& kepler_compute;
+
VKScreenInfo& screen_info;
const VKDevice& device;
- VKResourceManager& resource_manager;
VKMemoryManager& memory_manager;
StateTracker& state_tracker;
VKScheduler& scheduler;
@@ -300,8 +300,8 @@ private:
VKPipelineCache pipeline_cache;
VKBufferCache buffer_cache;
VKSamplerCache sampler_cache;
- VKFenceManager fence_manager;
VKQueryCache query_cache;
+ VKFenceManager fence_manager;
vk::Buffer default_buffer;
VKMemoryCommit default_buffer_commit;
diff --git a/src/video_core/renderer_vulkan/vk_resource_manager.cpp b/src/video_core/renderer_vulkan/vk_resource_manager.cpp
deleted file mode 100644
index f19330a36..000000000
--- a/src/video_core/renderer_vulkan/vk_resource_manager.cpp
+++ /dev/null
@@ -1,311 +0,0 @@
-// Copyright 2018 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
-
-#include <algorithm>
-#include <optional>
-#include "common/assert.h"
-#include "common/logging/log.h"
-#include "video_core/renderer_vulkan/vk_device.h"
-#include "video_core/renderer_vulkan/vk_resource_manager.h"
-#include "video_core/renderer_vulkan/wrapper.h"
-
-namespace Vulkan {
-
-namespace {
-
-// TODO(Rodrigo): Fine tune these numbers.
-constexpr std::size_t COMMAND_BUFFER_POOL_SIZE = 0x1000;
-constexpr std::size_t FENCES_GROW_STEP = 0x40;
-
-constexpr VkFenceCreateInfo BuildFenceCreateInfo() {
- return {
- .sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO,
- .pNext = nullptr,
- .flags = 0,
- };
-}
-
-} // Anonymous namespace
-
-class CommandBufferPool final : public VKFencedPool {
-public:
- explicit CommandBufferPool(const VKDevice& device)
- : VKFencedPool(COMMAND_BUFFER_POOL_SIZE), device{device} {}
-
- void Allocate(std::size_t begin, std::size_t end) override {
- // Command buffers are going to be commited, recorded, executed every single usage cycle.
- // They are also going to be reseted when commited.
- Pool& pool = pools.emplace_back();
- pool.handle = device.GetLogical().CreateCommandPool({
- .sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,
- .pNext = nullptr,
- .flags = VK_COMMAND_POOL_CREATE_TRANSIENT_BIT |
- VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT,
- .queueFamilyIndex = device.GetGraphicsFamily(),
- });
- pool.cmdbufs = pool.handle.Allocate(COMMAND_BUFFER_POOL_SIZE);
- }
-
- VkCommandBuffer Commit(VKFence& fence) {
- const std::size_t index = CommitResource(fence);
- const auto pool_index = index / COMMAND_BUFFER_POOL_SIZE;
- const auto sub_index = index % COMMAND_BUFFER_POOL_SIZE;
- return pools[pool_index].cmdbufs[sub_index];
- }
-
-private:
- struct Pool {
- vk::CommandPool handle;
- vk::CommandBuffers cmdbufs;
- };
-
- const VKDevice& device;
- std::vector<Pool> pools;
-};
-
-VKResource::VKResource() = default;
-
-VKResource::~VKResource() = default;
-
-VKFence::VKFence(const VKDevice& device)
- : device{device}, handle{device.GetLogical().CreateFence(BuildFenceCreateInfo())} {}
-
-VKFence::~VKFence() = default;
-
-void VKFence::Wait() {
- switch (const VkResult result = handle.Wait()) {
- case VK_SUCCESS:
- return;
- case VK_ERROR_DEVICE_LOST:
- device.ReportLoss();
- [[fallthrough]];
- default:
- throw vk::Exception(result);
- }
-}
-
-void VKFence::Release() {
- ASSERT(is_owned);
- is_owned = false;
-}
-
-void VKFence::Commit() {
- is_owned = true;
- is_used = true;
-}
-
-bool VKFence::Tick(bool gpu_wait, bool owner_wait) {
- if (!is_used) {
- // If a fence is not used it's always free.
- return true;
- }
- if (is_owned && !owner_wait) {
- // The fence is still being owned (Release has not been called) and ownership wait has
- // not been asked.
- return false;
- }
-
- if (gpu_wait) {
- // Wait for the fence if it has been requested.
- (void)handle.Wait();
- } else {
- if (handle.GetStatus() != VK_SUCCESS) {
- // Vulkan fence is not ready, not much it can do here
- return false;
- }
- }
-
- // Broadcast resources their free state.
- for (auto* resource : protected_resources) {
- resource->OnFenceRemoval(this);
- }
- protected_resources.clear();
-
- // Prepare fence for reusage.
- handle.Reset();
- is_used = false;
- return true;
-}
-
-void VKFence::Protect(VKResource* resource) {
- protected_resources.push_back(resource);
-}
-
-void VKFence::Unprotect(VKResource* resource) {
- const auto it = std::find(protected_resources.begin(), protected_resources.end(), resource);
- ASSERT(it != protected_resources.end());
-
- resource->OnFenceRemoval(this);
- protected_resources.erase(it);
-}
-
-void VKFence::RedirectProtection(VKResource* old_resource, VKResource* new_resource) noexcept {
- std::replace(std::begin(protected_resources), std::end(protected_resources), old_resource,
- new_resource);
-}
-
-VKFenceWatch::VKFenceWatch() = default;
-
-VKFenceWatch::VKFenceWatch(VKFence& initial_fence) {
- Watch(initial_fence);
-}
-
-VKFenceWatch::VKFenceWatch(VKFenceWatch&& rhs) noexcept {
- fence = std::exchange(rhs.fence, nullptr);
- if (fence) {
- fence->RedirectProtection(&rhs, this);
- }
-}
-
-VKFenceWatch& VKFenceWatch::operator=(VKFenceWatch&& rhs) noexcept {
- fence = std::exchange(rhs.fence, nullptr);
- if (fence) {
- fence->RedirectProtection(&rhs, this);
- }
- return *this;
-}
-
-VKFenceWatch::~VKFenceWatch() {
- if (fence) {
- fence->Unprotect(this);
- }
-}
-
-void VKFenceWatch::Wait() {
- if (fence == nullptr) {
- return;
- }
- fence->Wait();
- fence->Unprotect(this);
-}
-
-void VKFenceWatch::Watch(VKFence& new_fence) {
- Wait();
- fence = &new_fence;
- fence->Protect(this);
-}
-
-bool VKFenceWatch::TryWatch(VKFence& new_fence) {
- if (fence) {
- return false;
- }
- fence = &new_fence;
- fence->Protect(this);
- return true;
-}
-
-void VKFenceWatch::OnFenceRemoval(VKFence* signaling_fence) {
- ASSERT_MSG(signaling_fence == fence, "Removing the wrong fence");
- fence = nullptr;
-}
-
-VKFencedPool::VKFencedPool(std::size_t grow_step) : grow_step{grow_step} {}
-
-VKFencedPool::~VKFencedPool() = default;
-
-std::size_t VKFencedPool::CommitResource(VKFence& fence) {
- const auto Search = [&](std::size_t begin, std::size_t end) -> std::optional<std::size_t> {
- for (std::size_t iterator = begin; iterator < end; ++iterator) {
- if (watches[iterator]->TryWatch(fence)) {
- // The resource is now being watched, a free resource was successfully found.
- return iterator;
- }
- }
- return {};
- };
- // Try to find a free resource from the hinted position to the end.
- auto found = Search(free_iterator, watches.size());
- if (!found) {
- // Search from beginning to the hinted position.
- found = Search(0, free_iterator);
- if (!found) {
- // Both searches failed, the pool is full; handle it.
- const std::size_t free_resource = ManageOverflow();
-
- // Watch will wait for the resource to be free.
- watches[free_resource]->Watch(fence);
- found = free_resource;
- }
- }
- // Free iterator is hinted to the resource after the one that's been commited.
- free_iterator = (*found + 1) % watches.size();
- return *found;
-}
-
-std::size_t VKFencedPool::ManageOverflow() {
- const std::size_t old_capacity = watches.size();
- Grow();
-
- // The last entry is guaranted to be free, since it's the first element of the freshly
- // allocated resources.
- return old_capacity;
-}
-
-void VKFencedPool::Grow() {
- const std::size_t old_capacity = watches.size();
- watches.resize(old_capacity + grow_step);
- std::generate(watches.begin() + old_capacity, watches.end(),
- []() { return std::make_unique<VKFenceWatch>(); });
- Allocate(old_capacity, old_capacity + grow_step);
-}
-
-VKResourceManager::VKResourceManager(const VKDevice& device) : device{device} {
- GrowFences(FENCES_GROW_STEP);
- command_buffer_pool = std::make_unique<CommandBufferPool>(device);
-}
-
-VKResourceManager::~VKResourceManager() = default;
-
-VKFence& VKResourceManager::CommitFence() {
- const auto StepFences = [&](bool gpu_wait, bool owner_wait) -> VKFence* {
- const auto Tick = [=](auto& fence) { return fence->Tick(gpu_wait, owner_wait); };
- const auto hinted = fences.begin() + fences_iterator;
-
- auto it = std::find_if(hinted, fences.end(), Tick);
- if (it == fences.end()) {
- it = std::find_if(fences.begin(), hinted, Tick);
- if (it == hinted) {
- return nullptr;
- }
- }
- fences_iterator = std::distance(fences.begin(), it) + 1;
- if (fences_iterator >= fences.size())
- fences_iterator = 0;
-
- auto& fence = *it;
- fence->Commit();
- return fence.get();
- };
-
- VKFence* found_fence = StepFences(false, false);
- if (!found_fence) {
- // Try again, this time waiting.
- found_fence = StepFences(true, false);
-
- if (!found_fence) {
- // Allocate new fences and try again.
- LOG_INFO(Render_Vulkan, "Allocating new fences {} -> {}", fences.size(),
- fences.size() + FENCES_GROW_STEP);
-
- GrowFences(FENCES_GROW_STEP);
- found_fence = StepFences(true, false);
- ASSERT(found_fence != nullptr);
- }
- }
- return *found_fence;
-}
-
-VkCommandBuffer VKResourceManager::CommitCommandBuffer(VKFence& fence) {
- return command_buffer_pool->Commit(fence);
-}
-
-void VKResourceManager::GrowFences(std::size_t new_fences_count) {
- const std::size_t previous_size = fences.size();
- fences.resize(previous_size + new_fences_count);
-
- std::generate(fences.begin() + previous_size, fences.end(),
- [this] { return std::make_unique<VKFence>(device); });
-}
-
-} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_resource_manager.h b/src/video_core/renderer_vulkan/vk_resource_manager.h
deleted file mode 100644
index f683d2276..000000000
--- a/src/video_core/renderer_vulkan/vk_resource_manager.h
+++ /dev/null
@@ -1,196 +0,0 @@
-// Copyright 2018 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
-
-#pragma once
-
-#include <cstddef>
-#include <memory>
-#include <vector>
-#include "video_core/renderer_vulkan/wrapper.h"
-
-namespace Vulkan {
-
-class VKDevice;
-class VKFence;
-class VKResourceManager;
-
-class CommandBufferPool;
-
-/// Interface for a Vulkan resource
-class VKResource {
-public:
- explicit VKResource();
- virtual ~VKResource();
-
- /**
- * Signals the object that an owning fence has been signaled.
- * @param signaling_fence Fence that signals its usage end.
- */
- virtual void OnFenceRemoval(VKFence* signaling_fence) = 0;
-};
-
-/**
- * Fences take ownership of objects, protecting them from GPU-side or driver-side concurrent access.
- * They must be commited from the resource manager. Their usage flow is: commit the fence from the
- * resource manager, protect resources with it and use them, send the fence to an execution queue
- * and Wait for it if needed and then call Release. Used resources will automatically be signaled
- * when they are free to be reused.
- * @brief Protects resources for concurrent usage and signals its release.
- */
-class VKFence {
- friend class VKResourceManager;
-
-public:
- explicit VKFence(const VKDevice& device);
- ~VKFence();
-
- /**
- * Waits for the fence to be signaled.
- * @warning You must have ownership of the fence and it has to be previously sent to a queue to
- * call this function.
- */
- void Wait();
-
- /**
- * Releases ownership of the fence. Pass after it has been sent to an execution queue.
- * Unmanaged usage of the fence after the call will result in undefined behavior because it may
- * be being used for something else.
- */
- void Release();
-
- /// Protects a resource with this fence.
- void Protect(VKResource* resource);
-
- /// Removes protection for a resource.
- void Unprotect(VKResource* resource);
-
- /// Redirects one protected resource to a new address.
- void RedirectProtection(VKResource* old_resource, VKResource* new_resource) noexcept;
-
- /// Retreives the fence.
- operator VkFence() const {
- return *handle;
- }
-
-private:
- /// Take ownership of the fence.
- void Commit();
-
- /**
- * Updates the fence status.
- * @warning Waiting for the owner might soft lock the execution.
- * @param gpu_wait Wait for the fence to be signaled by the driver.
- * @param owner_wait Wait for the owner to signal its freedom.
- * @returns True if the fence is free. Waiting for gpu and owner will always return true.
- */
- bool Tick(bool gpu_wait, bool owner_wait);
-
- const VKDevice& device; ///< Device handler
- vk::Fence handle; ///< Vulkan fence
- std::vector<VKResource*> protected_resources; ///< List of resources protected by this fence
- bool is_owned = false; ///< The fence has been commited but not released yet.
- bool is_used = false; ///< The fence has been commited but it has not been checked to be free.
-};
-
-/**
- * A fence watch is used to keep track of the usage of a fence and protect a resource or set of
- * resources without having to inherit VKResource from their handlers.
- */
-class VKFenceWatch final : public VKResource {
-public:
- explicit VKFenceWatch();
- VKFenceWatch(VKFence& initial_fence);
- VKFenceWatch(VKFenceWatch&&) noexcept;
- VKFenceWatch(const VKFenceWatch&) = delete;
- ~VKFenceWatch() override;
-
- VKFenceWatch& operator=(VKFenceWatch&&) noexcept;
-
- /// Waits for the fence to be released.
- void Wait();
-
- /**
- * Waits for a previous fence and watches a new one.
- * @param new_fence New fence to wait to.
- */
- void Watch(VKFence& new_fence);
-
- /**
- * Checks if it's currently being watched and starts watching it if it's available.
- * @returns True if a watch has started, false if it's being watched.
- */
- bool TryWatch(VKFence& new_fence);
-
- void OnFenceRemoval(VKFence* signaling_fence) override;
-
- /**
- * Do not use it paired with Watch. Use TryWatch instead.
- * Returns true when the watch is free.
- */
- bool IsUsed() const {
- return fence != nullptr;
- }
-
-private:
- VKFence* fence{}; ///< Fence watching this resource. nullptr when the watch is free.
-};
-
-/**
- * Handles a pool of resources protected by fences. Manages resource overflow allocating more
- * resources.
- */
-class VKFencedPool {
-public:
- explicit VKFencedPool(std::size_t grow_step);
- virtual ~VKFencedPool();
-
-protected:
- /**
- * Commits a free resource and protects it with a fence. It may allocate new resources.
- * @param fence Fence that protects the commited resource.
- * @returns Index of the resource commited.
- */
- std::size_t CommitResource(VKFence& fence);
-
- /// Called when a chunk of resources have to be allocated.
- virtual void Allocate(std::size_t begin, std::size_t end) = 0;
-
-private:
- /// Manages pool overflow allocating new resources.
- std::size_t ManageOverflow();
-
- /// Allocates a new page of resources.
- void Grow();
-
- std::size_t grow_step = 0; ///< Number of new resources created after an overflow
- std::size_t free_iterator = 0; ///< Hint to where the next free resources is likely to be found
- std::vector<std::unique_ptr<VKFenceWatch>> watches; ///< Set of watched resources
-};
-
-/**
- * The resource manager handles all resources that can be protected with a fence avoiding
- * driver-side or GPU-side concurrent usage. Usage is documented in VKFence.
- */
-class VKResourceManager final {
-public:
- explicit VKResourceManager(const VKDevice& device);
- ~VKResourceManager();
-
- /// Commits a fence. It has to be sent to a queue and released.
- VKFence& CommitFence();
-
- /// Commits an unused command buffer and protects it with a fence.
- VkCommandBuffer CommitCommandBuffer(VKFence& fence);
-
-private:
- /// Allocates new fences.
- void GrowFences(std::size_t new_fences_count);
-
- const VKDevice& device; ///< Device handler.
- std::size_t fences_iterator = 0; ///< Index where a free fence is likely to be found.
- std::vector<std::unique_ptr<VKFence>> fences; ///< Pool of fences.
- std::unique_ptr<CommandBufferPool> command_buffer_pool; ///< Pool of command buffers.
-};
-
-} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_resource_pool.cpp b/src/video_core/renderer_vulkan/vk_resource_pool.cpp
new file mode 100644
index 000000000..ee274ac59
--- /dev/null
+++ b/src/video_core/renderer_vulkan/vk_resource_pool.cpp
@@ -0,0 +1,63 @@
+// Copyright 2020 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include <optional>
+
+#include "video_core/renderer_vulkan/vk_master_semaphore.h"
+#include "video_core/renderer_vulkan/vk_resource_pool.h"
+
+namespace Vulkan {
+
+ResourcePool::ResourcePool(MasterSemaphore& master_semaphore_, size_t grow_step_)
+ : master_semaphore{master_semaphore_}, grow_step{grow_step_} {}
+
+ResourcePool::~ResourcePool() = default;
+
+size_t ResourcePool::CommitResource() {
+ // Refresh semaphore to query updated results
+ master_semaphore.Refresh();
+
+ const auto search = [this](size_t begin, size_t end) -> std::optional<size_t> {
+ for (size_t iterator = begin; iterator < end; ++iterator) {
+ if (master_semaphore.IsFree(ticks[iterator])) {
+ ticks[iterator] = master_semaphore.CurrentTick();
+ return iterator;
+ }
+ }
+ return {};
+ };
+ // Try to find a free resource from the hinted position to the end.
+ auto found = search(free_iterator, ticks.size());
+ if (!found) {
+ // Search from beginning to the hinted position.
+ found = search(0, free_iterator);
+ if (!found) {
+ // Both searches failed, the pool is full; handle it.
+ const size_t free_resource = ManageOverflow();
+
+ ticks[free_resource] = master_semaphore.CurrentTick();
+ found = free_resource;
+ }
+ }
+ // Free iterator is hinted to the resource after the one that's been commited.
+ free_iterator = (*found + 1) % ticks.size();
+ return *found;
+}
+
+size_t ResourcePool::ManageOverflow() {
+ const size_t old_capacity = ticks.size();
+ Grow();
+
+ // The last entry is guaranted to be free, since it's the first element of the freshly
+ // allocated resources.
+ return old_capacity;
+}
+
+void ResourcePool::Grow() {
+ const size_t old_capacity = ticks.size();
+ ticks.resize(old_capacity + grow_step);
+ Allocate(old_capacity, old_capacity + grow_step);
+}
+
+} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_resource_pool.h b/src/video_core/renderer_vulkan/vk_resource_pool.h
new file mode 100644
index 000000000..a018c7ec2
--- /dev/null
+++ b/src/video_core/renderer_vulkan/vk_resource_pool.h
@@ -0,0 +1,43 @@
+// Copyright 2020 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <vector>
+
+#include "common/common_types.h"
+
+namespace Vulkan {
+
+class MasterSemaphore;
+
+/**
+ * Handles a pool of resources protected by fences. Manages resource overflow allocating more
+ * resources.
+ */
+class ResourcePool {
+public:
+ explicit ResourcePool(MasterSemaphore& master_semaphore, size_t grow_step);
+ virtual ~ResourcePool();
+
+protected:
+ size_t CommitResource();
+
+ /// Called when a chunk of resources have to be allocated.
+ virtual void Allocate(size_t begin, size_t end) = 0;
+
+private:
+ /// Manages pool overflow allocating new resources.
+ size_t ManageOverflow();
+
+ /// Allocates a new page of resources.
+ void Grow();
+
+ MasterSemaphore& master_semaphore;
+ size_t grow_step = 0; ///< Number of new resources created after an overflow
+ size_t free_iterator = 0; ///< Hint to where the next free resources is likely to be found
+ std::vector<u64> ticks; ///< Ticks for each resource
+};
+
+} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_scheduler.cpp b/src/video_core/renderer_vulkan/vk_scheduler.cpp
index dbbd0961a..1a483dc71 100644
--- a/src/video_core/renderer_vulkan/vk_scheduler.cpp
+++ b/src/video_core/renderer_vulkan/vk_scheduler.cpp
@@ -10,9 +10,10 @@
#include "common/microprofile.h"
#include "common/thread.h"
+#include "video_core/renderer_vulkan/vk_command_pool.h"
#include "video_core/renderer_vulkan/vk_device.h"
+#include "video_core/renderer_vulkan/vk_master_semaphore.h"
#include "video_core/renderer_vulkan/vk_query_cache.h"
-#include "video_core/renderer_vulkan/vk_resource_manager.h"
#include "video_core/renderer_vulkan/vk_scheduler.h"
#include "video_core/renderer_vulkan/vk_state_tracker.h"
#include "video_core/renderer_vulkan/wrapper.h"
@@ -35,10 +36,10 @@ void VKScheduler::CommandChunk::ExecuteAll(vk::CommandBuffer cmdbuf) {
last = nullptr;
}
-VKScheduler::VKScheduler(const VKDevice& device, VKResourceManager& resource_manager,
- StateTracker& state_tracker)
- : device{device}, resource_manager{resource_manager}, state_tracker{state_tracker},
- next_fence{&resource_manager.CommitFence()} {
+VKScheduler::VKScheduler(const VKDevice& device_, StateTracker& state_tracker_)
+ : device{device_}, state_tracker{state_tracker_},
+ master_semaphore{std::make_unique<MasterSemaphore>(device)},
+ command_pool{std::make_unique<CommandPool>(*master_semaphore, device)} {
AcquireNewChunk();
AllocateNewContext();
worker_thread = std::thread(&VKScheduler::WorkerThread, this);
@@ -50,20 +51,27 @@ VKScheduler::~VKScheduler() {
worker_thread.join();
}
-void VKScheduler::Flush(bool release_fence, VkSemaphore semaphore) {
+u64 VKScheduler::CurrentTick() const noexcept {
+ return master_semaphore->CurrentTick();
+}
+
+bool VKScheduler::IsFree(u64 tick) const noexcept {
+ return master_semaphore->IsFree(tick);
+}
+
+void VKScheduler::Wait(u64 tick) {
+ master_semaphore->Wait(tick);
+}
+
+void VKScheduler::Flush(VkSemaphore semaphore) {
SubmitExecution(semaphore);
- if (release_fence) {
- current_fence->Release();
- }
AllocateNewContext();
}
-void VKScheduler::Finish(bool release_fence, VkSemaphore semaphore) {
+void VKScheduler::Finish(VkSemaphore semaphore) {
+ const u64 presubmit_tick = CurrentTick();
SubmitExecution(semaphore);
- current_fence->Wait();
- if (release_fence) {
- current_fence->Release();
- }
+ Wait(presubmit_tick);
AllocateNewContext();
}
@@ -160,18 +168,38 @@ void VKScheduler::SubmitExecution(VkSemaphore semaphore) {
current_cmdbuf.End();
+ const VkSemaphore timeline_semaphore = master_semaphore->Handle();
+ const u32 num_signal_semaphores = semaphore ? 2U : 1U;
+
+ const u64 signal_value = master_semaphore->CurrentTick();
+ const u64 wait_value = signal_value - 1;
+ const VkPipelineStageFlags wait_stage_mask = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
+
+ master_semaphore->NextTick();
+
+ const std::array signal_values{signal_value, u64(0)};
+ const std::array signal_semaphores{timeline_semaphore, semaphore};
+
+ const VkTimelineSemaphoreSubmitInfoKHR timeline_si{
+ .sType = VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO_KHR,
+ .pNext = nullptr,
+ .waitSemaphoreValueCount = 1,
+ .pWaitSemaphoreValues = &wait_value,
+ .signalSemaphoreValueCount = num_signal_semaphores,
+ .pSignalSemaphoreValues = signal_values.data(),
+ };
const VkSubmitInfo submit_info{
.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
- .pNext = nullptr,
- .waitSemaphoreCount = 0,
- .pWaitSemaphores = nullptr,
- .pWaitDstStageMask = nullptr,
+ .pNext = &timeline_si,
+ .waitSemaphoreCount = 1,
+ .pWaitSemaphores = &timeline_semaphore,
+ .pWaitDstStageMask = &wait_stage_mask,
.commandBufferCount = 1,
.pCommandBuffers = current_cmdbuf.address(),
- .signalSemaphoreCount = semaphore ? 1U : 0U,
- .pSignalSemaphores = &semaphore,
+ .signalSemaphoreCount = num_signal_semaphores,
+ .pSignalSemaphores = signal_semaphores.data(),
};
- switch (const VkResult result = device.GetGraphicsQueue().Submit(submit_info, *current_fence)) {
+ switch (const VkResult result = device.GetGraphicsQueue().Submit(submit_info)) {
case VK_SUCCESS:
break;
case VK_ERROR_DEVICE_LOST:
@@ -183,14 +211,9 @@ void VKScheduler::SubmitExecution(VkSemaphore semaphore) {
}
void VKScheduler::AllocateNewContext() {
- ++ticks;
-
std::unique_lock lock{mutex};
- current_fence = next_fence;
- next_fence = &resource_manager.CommitFence();
- current_cmdbuf = vk::CommandBuffer(resource_manager.CommitCommandBuffer(*current_fence),
- device.GetDispatchLoader());
+ current_cmdbuf = vk::CommandBuffer(command_pool->Commit(), device.GetDispatchLoader());
current_cmdbuf.Begin({
.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
.pNext = nullptr,
diff --git a/src/video_core/renderer_vulkan/vk_scheduler.h b/src/video_core/renderer_vulkan/vk_scheduler.h
index 970a65566..7be8a19f0 100644
--- a/src/video_core/renderer_vulkan/vk_scheduler.h
+++ b/src/video_core/renderer_vulkan/vk_scheduler.h
@@ -16,42 +16,33 @@
namespace Vulkan {
+class CommandPool;
+class MasterSemaphore;
class StateTracker;
class VKDevice;
-class VKFence;
class VKQueryCache;
-class VKResourceManager;
-
-class VKFenceView {
-public:
- VKFenceView() = default;
- VKFenceView(VKFence* const& fence) : fence{fence} {}
-
- VKFence* operator->() const noexcept {
- return fence;
- }
-
- operator VKFence&() const noexcept {
- return *fence;
- }
-
-private:
- VKFence* const& fence;
-};
/// The scheduler abstracts command buffer and fence management with an interface that's able to do
/// OpenGL-like operations on Vulkan command buffers.
class VKScheduler {
public:
- explicit VKScheduler(const VKDevice& device, VKResourceManager& resource_manager,
- StateTracker& state_tracker);
+ explicit VKScheduler(const VKDevice& device, StateTracker& state_tracker);
~VKScheduler();
+ /// Returns the current command buffer tick.
+ [[nodiscard]] u64 CurrentTick() const noexcept;
+
+ /// Returns true when a tick has been triggered by the GPU.
+ [[nodiscard]] bool IsFree(u64 tick) const noexcept;
+
+ /// Waits for the given tick to trigger on the GPU.
+ void Wait(u64 tick);
+
/// Sends the current execution context to the GPU.
- void Flush(bool release_fence = true, VkSemaphore semaphore = nullptr);
+ void Flush(VkSemaphore semaphore = nullptr);
/// Sends the current execution context to the GPU and waits for it to complete.
- void Finish(bool release_fence = true, VkSemaphore semaphore = nullptr);
+ void Finish(VkSemaphore semaphore = nullptr);
/// Waits for the worker thread to finish executing everything. After this function returns it's
/// safe to touch worker resources.
@@ -86,14 +77,9 @@ public:
(void)chunk->Record(command);
}
- /// Gets a reference to the current fence.
- VKFenceView GetFence() const {
- return current_fence;
- }
-
- /// Returns the current command buffer tick.
- u64 Ticks() const {
- return ticks;
+ /// Returns the master timeline semaphore.
+ [[nodiscard]] MasterSemaphore& GetMasterSemaphore() const noexcept {
+ return *master_semaphore;
}
private:
@@ -171,6 +157,13 @@ private:
std::array<u8, 0x8000> data{};
};
+ struct State {
+ VkRenderPass renderpass = nullptr;
+ VkFramebuffer framebuffer = nullptr;
+ VkExtent2D render_area = {0, 0};
+ VkPipeline graphics_pipeline = nullptr;
+ };
+
void WorkerThread();
void SubmitExecution(VkSemaphore semaphore);
@@ -186,30 +179,23 @@ private:
void AcquireNewChunk();
const VKDevice& device;
- VKResourceManager& resource_manager;
StateTracker& state_tracker;
+ std::unique_ptr<MasterSemaphore> master_semaphore;
+ std::unique_ptr<CommandPool> command_pool;
+
VKQueryCache* query_cache = nullptr;
vk::CommandBuffer current_cmdbuf;
- VKFence* current_fence = nullptr;
- VKFence* next_fence = nullptr;
-
- struct State {
- VkRenderPass renderpass = nullptr;
- VkFramebuffer framebuffer = nullptr;
- VkExtent2D render_area = {0, 0};
- VkPipeline graphics_pipeline = nullptr;
- } state;
std::unique_ptr<CommandChunk> chunk;
std::thread worker_thread;
+ State state;
Common::SPSCQueue<std::unique_ptr<CommandChunk>> chunk_queue;
Common::SPSCQueue<std::unique_ptr<CommandChunk>> chunk_reserve;
std::mutex mutex;
std::condition_variable cv;
- std::atomic<u64> ticks = 0;
bool quit = false;
};
diff --git a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp
index 5eca0ab91..2fd3b7f39 100644
--- a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp
+++ b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp
@@ -10,36 +10,18 @@
#include "common/bit_util.h"
#include "common/common_types.h"
#include "video_core/renderer_vulkan/vk_device.h"
-#include "video_core/renderer_vulkan/vk_resource_manager.h"
#include "video_core/renderer_vulkan/vk_scheduler.h"
#include "video_core/renderer_vulkan/vk_staging_buffer_pool.h"
#include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan {
-VKStagingBufferPool::StagingBuffer::StagingBuffer(std::unique_ptr<VKBuffer> buffer, VKFence& fence,
- u64 last_epoch)
- : buffer{std::move(buffer)}, watch{fence}, last_epoch{last_epoch} {}
+VKStagingBufferPool::StagingBuffer::StagingBuffer(std::unique_ptr<VKBuffer> buffer_)
+ : buffer{std::move(buffer_)} {}
-VKStagingBufferPool::StagingBuffer::StagingBuffer(StagingBuffer&& rhs) noexcept {
- buffer = std::move(rhs.buffer);
- watch = std::move(rhs.watch);
- last_epoch = rhs.last_epoch;
-}
-
-VKStagingBufferPool::StagingBuffer::~StagingBuffer() = default;
-
-VKStagingBufferPool::StagingBuffer& VKStagingBufferPool::StagingBuffer::operator=(
- StagingBuffer&& rhs) noexcept {
- buffer = std::move(rhs.buffer);
- watch = std::move(rhs.watch);
- last_epoch = rhs.last_epoch;
- return *this;
-}
-
-VKStagingBufferPool::VKStagingBufferPool(const VKDevice& device, VKMemoryManager& memory_manager,
- VKScheduler& scheduler)
- : device{device}, memory_manager{memory_manager}, scheduler{scheduler} {}
+VKStagingBufferPool::VKStagingBufferPool(const VKDevice& device_, VKMemoryManager& memory_manager_,
+ VKScheduler& scheduler_)
+ : device{device_}, memory_manager{memory_manager_}, scheduler{scheduler_} {}
VKStagingBufferPool::~VKStagingBufferPool() = default;
@@ -51,7 +33,6 @@ VKBuffer& VKStagingBufferPool::GetUnusedBuffer(std::size_t size, bool host_visib
}
void VKStagingBufferPool::TickFrame() {
- ++epoch;
current_delete_level = (current_delete_level + 1) % NumLevels;
ReleaseCache(true);
@@ -59,11 +40,12 @@ void VKStagingBufferPool::TickFrame() {
}
VKBuffer* VKStagingBufferPool::TryGetReservedBuffer(std::size_t size, bool host_visible) {
- for (auto& entry : GetCache(host_visible)[Common::Log2Ceil64(size)].entries) {
- if (entry.watch.TryWatch(scheduler.GetFence())) {
- entry.last_epoch = epoch;
- return &*entry.buffer;
+ for (StagingBuffer& entry : GetCache(host_visible)[Common::Log2Ceil64(size)].entries) {
+ if (!scheduler.IsFree(entry.tick)) {
+ continue;
}
+ entry.tick = scheduler.CurrentTick();
+ return &*entry.buffer;
}
return nullptr;
}
@@ -86,8 +68,10 @@ VKBuffer& VKStagingBufferPool::CreateStagingBuffer(std::size_t size, bool host_v
});
buffer->commit = memory_manager.Commit(buffer->handle, host_visible);
- auto& entries = GetCache(host_visible)[log2].entries;
- return *entries.emplace_back(std::move(buffer), scheduler.GetFence(), epoch).buffer;
+ std::vector<StagingBuffer>& entries = GetCache(host_visible)[log2].entries;
+ StagingBuffer& entry = entries.emplace_back(std::move(buffer));
+ entry.tick = scheduler.CurrentTick();
+ return *entry.buffer;
}
VKStagingBufferPool::StagingBuffersCache& VKStagingBufferPool::GetCache(bool host_visible) {
@@ -109,9 +93,8 @@ u64 VKStagingBufferPool::ReleaseLevel(StagingBuffersCache& cache, std::size_t lo
auto& entries = staging.entries;
const std::size_t old_size = entries.size();
- const auto is_deleteable = [this](const auto& entry) {
- static constexpr u64 epochs_to_destroy = 180;
- return entry.last_epoch + epochs_to_destroy < epoch && !entry.watch.IsUsed();
+ const auto is_deleteable = [this](const StagingBuffer& entry) {
+ return scheduler.IsFree(entry.tick);
};
const std::size_t begin_offset = staging.delete_index;
const std::size_t end_offset = std::min(begin_offset + deletions_per_tick, old_size);
diff --git a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h
index 3c4901437..2dd5049ac 100644
--- a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h
+++ b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h
@@ -10,13 +10,11 @@
#include "common/common_types.h"
#include "video_core/renderer_vulkan/vk_memory_manager.h"
-#include "video_core/renderer_vulkan/vk_resource_manager.h"
#include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan {
class VKDevice;
-class VKFenceWatch;
class VKScheduler;
struct VKBuffer final {
@@ -36,16 +34,10 @@ public:
private:
struct StagingBuffer final {
- explicit StagingBuffer(std::unique_ptr<VKBuffer> buffer, VKFence& fence, u64 last_epoch);
- StagingBuffer(StagingBuffer&& rhs) noexcept;
- StagingBuffer(const StagingBuffer&) = delete;
- ~StagingBuffer();
-
- StagingBuffer& operator=(StagingBuffer&& rhs) noexcept;
+ explicit StagingBuffer(std::unique_ptr<VKBuffer> buffer);
std::unique_ptr<VKBuffer> buffer;
- VKFenceWatch watch;
- u64 last_epoch = 0;
+ u64 tick = 0;
};
struct StagingBuffers final {
@@ -73,8 +65,6 @@ private:
StagingBuffersCache host_staging_buffers;
StagingBuffersCache device_staging_buffers;
- u64 epoch = 0;
-
std::size_t current_delete_level = 0;
};
diff --git a/src/video_core/renderer_vulkan/vk_state_tracker.cpp b/src/video_core/renderer_vulkan/vk_state_tracker.cpp
index 9151d9fb1..5d2c4a796 100644
--- a/src/video_core/renderer_vulkan/vk_state_tracker.cpp
+++ b/src/video_core/renderer_vulkan/vk_state_tracker.cpp
@@ -42,7 +42,6 @@ Flags MakeInvalidationFlags() {
flags[DepthWriteEnable] = true;
flags[DepthCompareOp] = true;
flags[FrontFace] = true;
- flags[PrimitiveTopology] = true;
flags[StencilOp] = true;
flags[StencilTestEnable] = true;
return flags;
@@ -112,10 +111,6 @@ void SetupDirtyFrontFace(Tables& tables) {
table[OFF(screen_y_control)] = FrontFace;
}
-void SetupDirtyPrimitiveTopology(Tables& tables) {
- tables[0][OFF(draw.topology)] = PrimitiveTopology;
-}
-
void SetupDirtyStencilOp(Tables& tables) {
auto& table = tables[0];
table[OFF(stencil_front_op_fail)] = StencilOp;
@@ -137,12 +132,9 @@ void SetupDirtyStencilTestEnable(Tables& tables) {
} // Anonymous namespace
-StateTracker::StateTracker(Core::System& system)
- : system{system}, invalidation_flags{MakeInvalidationFlags()} {}
-
-void StateTracker::Initialize() {
- auto& dirty = system.GPU().Maxwell3D().dirty;
- auto& tables = dirty.tables;
+StateTracker::StateTracker(Tegra::GPU& gpu)
+ : flags{gpu.Maxwell3D().dirty.flags}, invalidation_flags{MakeInvalidationFlags()} {
+ auto& tables = gpu.Maxwell3D().dirty.tables;
SetupDirtyRenderTargets(tables);
SetupDirtyViewports(tables);
SetupDirtyScissors(tables);
@@ -156,13 +148,8 @@ void StateTracker::Initialize() {
SetupDirtyDepthWriteEnable(tables);
SetupDirtyDepthCompareOp(tables);
SetupDirtyFrontFace(tables);
- SetupDirtyPrimitiveTopology(tables);
SetupDirtyStencilOp(tables);
SetupDirtyStencilTestEnable(tables);
}
-void StateTracker::InvalidateCommandBufferState() {
- system.GPU().Maxwell3D().dirty.flags |= invalidation_flags;
-}
-
} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_state_tracker.h b/src/video_core/renderer_vulkan/vk_state_tracker.h
index 54ca0d6c6..1de789e57 100644
--- a/src/video_core/renderer_vulkan/vk_state_tracker.h
+++ b/src/video_core/renderer_vulkan/vk_state_tracker.h
@@ -32,7 +32,6 @@ enum : u8 {
DepthWriteEnable,
DepthCompareOp,
FrontFace,
- PrimitiveTopology,
StencilOp,
StencilTestEnable,
@@ -43,12 +42,15 @@ static_assert(Last <= std::numeric_limits<u8>::max());
} // namespace Dirty
class StateTracker {
-public:
- explicit StateTracker(Core::System& system);
+ using Maxwell = Tegra::Engines::Maxwell3D::Regs;
- void Initialize();
+public:
+ explicit StateTracker(Tegra::GPU& gpu);
- void InvalidateCommandBufferState();
+ void InvalidateCommandBufferState() {
+ flags |= invalidation_flags;
+ current_topology = INVALID_TOPOLOGY;
+ }
bool TouchViewports() {
return Exchange(Dirty::Viewports, false);
@@ -102,10 +104,6 @@ public:
return Exchange(Dirty::FrontFace, false);
}
- bool TouchPrimitiveTopology() {
- return Exchange(Dirty::PrimitiveTopology, false);
- }
-
bool TouchStencilOp() {
return Exchange(Dirty::StencilOp, false);
}
@@ -114,16 +112,24 @@ public:
return Exchange(Dirty::StencilTestEnable, false);
}
+ bool ChangePrimitiveTopology(Maxwell::PrimitiveTopology new_topology) {
+ const bool has_changed = current_topology != new_topology;
+ current_topology = new_topology;
+ return has_changed;
+ }
+
private:
+ static constexpr auto INVALID_TOPOLOGY = static_cast<Maxwell::PrimitiveTopology>(~0u);
+
bool Exchange(std::size_t id, bool new_value) const noexcept {
- auto& flags = system.GPU().Maxwell3D().dirty.flags;
const bool is_dirty = flags[id];
flags[id] = new_value;
return is_dirty;
}
- Core::System& system;
+ Tegra::Engines::Maxwell3D::DirtyState::Flags& flags;
Tegra::Engines::Maxwell3D::DirtyState::Flags invalidation_flags;
+ Maxwell::PrimitiveTopology current_topology = INVALID_TOPOLOGY;
};
} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_stream_buffer.cpp b/src/video_core/renderer_vulkan/vk_stream_buffer.cpp
index a5526a3f5..1b59612b9 100644
--- a/src/video_core/renderer_vulkan/vk_stream_buffer.cpp
+++ b/src/video_core/renderer_vulkan/vk_stream_buffer.cpp
@@ -11,7 +11,6 @@
#include "common/alignment.h"
#include "common/assert.h"
#include "video_core/renderer_vulkan/vk_device.h"
-#include "video_core/renderer_vulkan/vk_resource_manager.h"
#include "video_core/renderer_vulkan/vk_scheduler.h"
#include "video_core/renderer_vulkan/vk_stream_buffer.h"
#include "video_core/renderer_vulkan/wrapper.h"
@@ -57,9 +56,9 @@ u32 GetMemoryType(const VkPhysicalDeviceMemoryProperties& properties,
} // Anonymous namespace
-VKStreamBuffer::VKStreamBuffer(const VKDevice& device, VKScheduler& scheduler,
+VKStreamBuffer::VKStreamBuffer(const VKDevice& device_, VKScheduler& scheduler_,
VkBufferUsageFlags usage)
- : device{device}, scheduler{scheduler} {
+ : device{device_}, scheduler{scheduler_} {
CreateBuffers(usage);
ReserveWatches(current_watches, WATCHES_INITIAL_RESERVE);
ReserveWatches(previous_watches, WATCHES_INITIAL_RESERVE);
@@ -111,7 +110,7 @@ void VKStreamBuffer::Unmap(u64 size) {
}
auto& watch = current_watches[current_watch_cursor++];
watch.upper_bound = offset;
- watch.fence.Watch(scheduler.GetFence());
+ watch.tick = scheduler.CurrentTick();
}
void VKStreamBuffer::CreateBuffers(VkBufferUsageFlags usage) {
@@ -121,7 +120,8 @@ void VKStreamBuffer::CreateBuffers(VkBufferUsageFlags usage) {
// Substract from the preferred heap size some bytes to avoid getting out of memory.
const VkDeviceSize heap_size = memory_properties.memoryHeaps[preferred_heap].size;
- const VkDeviceSize allocable_size = heap_size - 9 * 1024 * 1024;
+ // As per DXVK's example, using `heap_size / 2`
+ const VkDeviceSize allocable_size = heap_size / 2;
buffer = device.GetLogical().CreateBuffer({
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
.pNext = nullptr,
@@ -157,7 +157,7 @@ void VKStreamBuffer::WaitPendingOperations(u64 requested_upper_bound) {
while (requested_upper_bound < wait_bound && wait_cursor < *invalidation_mark) {
auto& watch = previous_watches[wait_cursor];
wait_bound = watch.upper_bound;
- watch.fence.Wait();
+ scheduler.Wait(watch.tick);
++wait_cursor;
}
}
diff --git a/src/video_core/renderer_vulkan/vk_stream_buffer.h b/src/video_core/renderer_vulkan/vk_stream_buffer.h
index 689f0d276..5e15ad78f 100644
--- a/src/video_core/renderer_vulkan/vk_stream_buffer.h
+++ b/src/video_core/renderer_vulkan/vk_stream_buffer.h
@@ -14,7 +14,6 @@
namespace Vulkan {
class VKDevice;
-class VKFence;
class VKFenceWatch;
class VKScheduler;
@@ -44,8 +43,8 @@ public:
}
private:
- struct Watch final {
- VKFenceWatch fence;
+ struct Watch {
+ u64 tick{};
u64 upper_bound{};
};
diff --git a/src/video_core/renderer_vulkan/vk_swapchain.cpp b/src/video_core/renderer_vulkan/vk_swapchain.cpp
index 6bfd2abae..9636a7c65 100644
--- a/src/video_core/renderer_vulkan/vk_swapchain.cpp
+++ b/src/video_core/renderer_vulkan/vk_swapchain.cpp
@@ -12,7 +12,7 @@
#include "core/core.h"
#include "core/frontend/framebuffer_layout.h"
#include "video_core/renderer_vulkan/vk_device.h"
-#include "video_core/renderer_vulkan/vk_resource_manager.h"
+#include "video_core/renderer_vulkan/vk_scheduler.h"
#include "video_core/renderer_vulkan/vk_swapchain.h"
#include "video_core/renderer_vulkan/wrapper.h"
@@ -56,8 +56,8 @@ VkExtent2D ChooseSwapExtent(const VkSurfaceCapabilitiesKHR& capabilities, u32 wi
} // Anonymous namespace
-VKSwapchain::VKSwapchain(VkSurfaceKHR surface, const VKDevice& device)
- : surface{surface}, device{device} {}
+VKSwapchain::VKSwapchain(VkSurfaceKHR surface_, const VKDevice& device_, VKScheduler& scheduler_)
+ : surface{surface_}, device{device_}, scheduler{scheduler_} {}
VKSwapchain::~VKSwapchain() = default;
@@ -75,21 +75,18 @@ void VKSwapchain::Create(u32 width, u32 height, bool srgb) {
CreateSemaphores();
CreateImageViews();
- fences.resize(image_count, nullptr);
+ resource_ticks.clear();
+ resource_ticks.resize(image_count);
}
void VKSwapchain::AcquireNextImage() {
device.GetLogical().AcquireNextImageKHR(*swapchain, std::numeric_limits<u64>::max(),
*present_semaphores[frame_index], {}, &image_index);
- if (auto& fence = fences[image_index]; fence) {
- fence->Wait();
- fence->Release();
- fence = nullptr;
- }
+ scheduler.Wait(resource_ticks[image_index]);
}
-bool VKSwapchain::Present(VkSemaphore render_semaphore, VKFence& fence) {
+bool VKSwapchain::Present(VkSemaphore render_semaphore) {
const VkSemaphore present_semaphore{*present_semaphores[frame_index]};
const std::array<VkSemaphore, 2> semaphores{present_semaphore, render_semaphore};
const auto present_queue{device.GetPresentQueue()};
@@ -123,8 +120,7 @@ bool VKSwapchain::Present(VkSemaphore render_semaphore, VKFence& fence) {
break;
}
- ASSERT(fences[image_index] == nullptr);
- fences[image_index] = &fence;
+ resource_ticks[image_index] = scheduler.CurrentTick();
frame_index = (frame_index + 1) % static_cast<u32>(image_count);
return recreated;
}
diff --git a/src/video_core/renderer_vulkan/vk_swapchain.h b/src/video_core/renderer_vulkan/vk_swapchain.h
index a35d61345..6b39befdf 100644
--- a/src/video_core/renderer_vulkan/vk_swapchain.h
+++ b/src/video_core/renderer_vulkan/vk_swapchain.h
@@ -16,11 +16,11 @@ struct FramebufferLayout;
namespace Vulkan {
class VKDevice;
-class VKFence;
+class VKScheduler;
class VKSwapchain {
public:
- explicit VKSwapchain(VkSurfaceKHR surface, const VKDevice& device);
+ explicit VKSwapchain(VkSurfaceKHR surface, const VKDevice& device, VKScheduler& scheduler);
~VKSwapchain();
/// Creates (or recreates) the swapchain with a given size.
@@ -31,7 +31,7 @@ public:
/// Presents the rendered image to the swapchain. Returns true when the swapchains had to be
/// recreated. Takes responsability for the ownership of fence.
- bool Present(VkSemaphore render_semaphore, VKFence& fence);
+ bool Present(VkSemaphore render_semaphore);
/// Returns true when the framebuffer layout has changed.
bool HasFramebufferChanged(const Layout::FramebufferLayout& framebuffer) const;
@@ -74,6 +74,7 @@ private:
const VkSurfaceKHR surface;
const VKDevice& device;
+ VKScheduler& scheduler;
vk::SwapchainKHR swapchain;
@@ -81,7 +82,7 @@ private:
std::vector<VkImage> images;
std::vector<vk::ImageView> image_views;
std::vector<vk::Framebuffer> framebuffers;
- std::vector<VKFence*> fences;
+ std::vector<u64> resource_ticks;
std::vector<vk::Semaphore> present_semaphores;
u32 image_index{};
diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.cpp b/src/video_core/renderer_vulkan/vk_texture_cache.cpp
index 2c6f54101..f2c8f2ae1 100644
--- a/src/video_core/renderer_vulkan/vk_texture_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_texture_cache.cpp
@@ -188,12 +188,10 @@ u32 EncodeSwizzle(Tegra::Texture::SwizzleSource x_source, Tegra::Texture::Swizzl
} // Anonymous namespace
-CachedSurface::CachedSurface(Core::System& system, const VKDevice& device,
- VKResourceManager& resource_manager, VKMemoryManager& memory_manager,
+CachedSurface::CachedSurface(const VKDevice& device, VKMemoryManager& memory_manager,
VKScheduler& scheduler, VKStagingBufferPool& staging_pool,
GPUVAddr gpu_addr, const SurfaceParams& params)
- : SurfaceBase<View>{gpu_addr, params, device.IsOptimalAstcSupported()}, system{system},
- device{device}, resource_manager{resource_manager},
+ : SurfaceBase<View>{gpu_addr, params, device.IsOptimalAstcSupported()}, device{device},
memory_manager{memory_manager}, scheduler{scheduler}, staging_pool{staging_pool} {
if (params.IsBuffer()) {
buffer = CreateBuffer(device, params, host_memory_size);
@@ -490,19 +488,20 @@ VkImageView CachedSurfaceView::GetAttachment() {
return *render_target;
}
-VKTextureCache::VKTextureCache(Core::System& system, VideoCore::RasterizerInterface& rasterizer,
- const VKDevice& device, VKResourceManager& resource_manager,
- VKMemoryManager& memory_manager, VKScheduler& scheduler,
- VKStagingBufferPool& staging_pool)
- : TextureCache(system, rasterizer, device.IsOptimalAstcSupported()), device{device},
- resource_manager{resource_manager}, memory_manager{memory_manager}, scheduler{scheduler},
- staging_pool{staging_pool} {}
+VKTextureCache::VKTextureCache(VideoCore::RasterizerInterface& rasterizer,
+ Tegra::Engines::Maxwell3D& maxwell3d,
+ Tegra::MemoryManager& gpu_memory, const VKDevice& device_,
+ VKMemoryManager& memory_manager_, VKScheduler& scheduler_,
+ VKStagingBufferPool& staging_pool_)
+ : TextureCache(rasterizer, maxwell3d, gpu_memory, device_.IsOptimalAstcSupported()),
+ device{device_}, memory_manager{memory_manager_}, scheduler{scheduler_}, staging_pool{
+ staging_pool_} {}
VKTextureCache::~VKTextureCache() = default;
Surface VKTextureCache::CreateSurface(GPUVAddr gpu_addr, const SurfaceParams& params) {
- return std::make_shared<CachedSurface>(system, device, resource_manager, memory_manager,
- scheduler, staging_pool, gpu_addr, params);
+ return std::make_shared<CachedSurface>(device, memory_manager, scheduler, staging_pool,
+ gpu_addr, params);
}
void VKTextureCache::ImageCopy(Surface& src_surface, Surface& dst_surface,
diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.h b/src/video_core/renderer_vulkan/vk_texture_cache.h
index 807e26c8a..39202feba 100644
--- a/src/video_core/renderer_vulkan/vk_texture_cache.h
+++ b/src/video_core/renderer_vulkan/vk_texture_cache.h
@@ -15,10 +15,6 @@
#include "video_core/texture_cache/surface_base.h"
#include "video_core/texture_cache/texture_cache.h"
-namespace Core {
-class System;
-}
-
namespace VideoCore {
class RasterizerInterface;
}
@@ -27,7 +23,6 @@ namespace Vulkan {
class RasterizerVulkan;
class VKDevice;
-class VKResourceManager;
class VKScheduler;
class VKStagingBufferPool;
@@ -45,8 +40,7 @@ class CachedSurface final : public VideoCommon::SurfaceBase<View> {
friend CachedSurfaceView;
public:
- explicit CachedSurface(Core::System& system, const VKDevice& device,
- VKResourceManager& resource_manager, VKMemoryManager& memory_manager,
+ explicit CachedSurface(const VKDevice& device, VKMemoryManager& memory_manager,
VKScheduler& scheduler, VKStagingBufferPool& staging_pool,
GPUVAddr gpu_addr, const SurfaceParams& params);
~CachedSurface();
@@ -101,9 +95,7 @@ private:
VkImageSubresourceRange GetImageSubresourceRange() const;
- Core::System& system;
const VKDevice& device;
- VKResourceManager& resource_manager;
VKMemoryManager& memory_manager;
VKScheduler& scheduler;
VKStagingBufferPool& staging_pool;
@@ -201,10 +193,10 @@ private:
class VKTextureCache final : public TextureCacheBase {
public:
- explicit VKTextureCache(Core::System& system, VideoCore::RasterizerInterface& rasterizer,
- const VKDevice& device, VKResourceManager& resource_manager,
- VKMemoryManager& memory_manager, VKScheduler& scheduler,
- VKStagingBufferPool& staging_pool);
+ explicit VKTextureCache(VideoCore::RasterizerInterface& rasterizer,
+ Tegra::Engines::Maxwell3D& maxwell3d, Tegra::MemoryManager& gpu_memory,
+ const VKDevice& device, VKMemoryManager& memory_manager,
+ VKScheduler& scheduler, VKStagingBufferPool& staging_pool);
~VKTextureCache();
private:
@@ -219,7 +211,6 @@ private:
void BufferCopy(Surface& src_surface, Surface& dst_surface) override;
const VKDevice& device;
- VKResourceManager& resource_manager;
VKMemoryManager& memory_manager;
VKScheduler& scheduler;
VKStagingBufferPool& staging_pool;
diff --git a/src/video_core/renderer_vulkan/wrapper.cpp b/src/video_core/renderer_vulkan/wrapper.cpp
index 56055af1b..c034558a3 100644
--- a/src/video_core/renderer_vulkan/wrapper.cpp
+++ b/src/video_core/renderer_vulkan/wrapper.cpp
@@ -6,6 +6,7 @@
#include <exception>
#include <memory>
#include <optional>
+#include <string_view>
#include <utility>
#include <vector>
@@ -18,21 +19,42 @@ namespace Vulkan::vk {
namespace {
+template <typename Func>
+void SortPhysicalDevices(std::vector<VkPhysicalDevice>& devices, const InstanceDispatch& dld,
+ Func&& func) {
+ // Calling GetProperties calls Vulkan more than needed. But they are supposed to be cheap
+ // functions.
+ std::stable_sort(devices.begin(), devices.end(),
+ [&dld, &func](VkPhysicalDevice lhs, VkPhysicalDevice rhs) {
+ return func(vk::PhysicalDevice(lhs, dld).GetProperties(),
+ vk::PhysicalDevice(rhs, dld).GetProperties());
+ });
+}
+
+void SortPhysicalDevicesPerVendor(std::vector<VkPhysicalDevice>& devices,
+ const InstanceDispatch& dld,
+ std::initializer_list<u32> vendor_ids) {
+ for (auto it = vendor_ids.end(); it != vendor_ids.begin();) {
+ --it;
+ SortPhysicalDevices(devices, dld, [id = *it](const auto& lhs, const auto& rhs) {
+ return lhs.vendorID == id && rhs.vendorID != id;
+ });
+ }
+}
+
void SortPhysicalDevices(std::vector<VkPhysicalDevice>& devices, const InstanceDispatch& dld) {
- std::stable_sort(devices.begin(), devices.end(), [&](auto lhs, auto rhs) {
- // This will call Vulkan more than needed, but these calls are cheap.
- const auto lhs_properties = vk::PhysicalDevice(lhs, dld).GetProperties();
- const auto rhs_properties = vk::PhysicalDevice(rhs, dld).GetProperties();
-
- // Prefer discrete GPUs, Nvidia over AMD, AMD over Intel, Intel over the rest.
- const bool preferred =
- (lhs_properties.deviceType == VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU &&
- rhs_properties.deviceType != VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU) ||
- (lhs_properties.vendorID == 0x10DE && rhs_properties.vendorID != 0x10DE) ||
- (lhs_properties.vendorID == 0x1002 && rhs_properties.vendorID != 0x1002) ||
- (lhs_properties.vendorID == 0x8086 && rhs_properties.vendorID != 0x8086);
- return !preferred;
+ // Sort by name, this will set a base and make GPUs with higher numbers appear first
+ // (e.g. GTX 1650 will intentionally be listed before a GTX 1080).
+ SortPhysicalDevices(devices, dld, [](const auto& lhs, const auto& rhs) {
+ return std::string_view{lhs.deviceName} > std::string_view{rhs.deviceName};
});
+ // Prefer discrete over non-discrete
+ SortPhysicalDevices(devices, dld, [](const auto& lhs, const auto& rhs) {
+ return lhs.deviceType == VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU &&
+ rhs.deviceType != VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU;
+ });
+ // Prefer Nvidia over AMD, AMD over Intel, Intel over the rest.
+ SortPhysicalDevicesPerVendor(devices, dld, {0x10DE, 0x1002, 0x8086});
}
template <typename T>
@@ -149,6 +171,7 @@ void Load(VkDevice device, DeviceDispatch& dld) noexcept {
X(vkGetFenceStatus);
X(vkGetImageMemoryRequirements);
X(vkGetQueryPoolResults);
+ X(vkGetSemaphoreCounterValueKHR);
X(vkMapMemory);
X(vkQueueSubmit);
X(vkResetFences);
@@ -157,6 +180,7 @@ void Load(VkDevice device, DeviceDispatch& dld) noexcept {
X(vkUpdateDescriptorSetWithTemplateKHR);
X(vkUpdateDescriptorSets);
X(vkWaitForFences);
+ X(vkWaitSemaphoresKHR);
#undef X
}
@@ -263,6 +287,22 @@ const char* ToString(VkResult result) noexcept {
return "VK_ERROR_INVALID_DEVICE_ADDRESS_EXT";
case VkResult::VK_ERROR_FULL_SCREEN_EXCLUSIVE_MODE_LOST_EXT:
return "VK_ERROR_FULL_SCREEN_EXCLUSIVE_MODE_LOST_EXT";
+ case VkResult::VK_ERROR_UNKNOWN:
+ return "VK_ERROR_UNKNOWN";
+ case VkResult::VK_ERROR_INCOMPATIBLE_VERSION_KHR:
+ return "VK_ERROR_INCOMPATIBLE_VERSION_KHR";
+ case VkResult::VK_THREAD_IDLE_KHR:
+ return "VK_THREAD_IDLE_KHR";
+ case VkResult::VK_THREAD_DONE_KHR:
+ return "VK_THREAD_DONE_KHR";
+ case VkResult::VK_OPERATION_DEFERRED_KHR:
+ return "VK_OPERATION_DEFERRED_KHR";
+ case VkResult::VK_OPERATION_NOT_DEFERRED_KHR:
+ return "VK_OPERATION_NOT_DEFERRED_KHR";
+ case VkResult::VK_PIPELINE_COMPILE_REQUIRED_EXT:
+ return "VK_PIPELINE_COMPILE_REQUIRED_EXT";
+ case VkResult::VK_RESULT_MAX_ENUM:
+ return "VK_RESULT_MAX_ENUM";
}
return "Unknown";
}
@@ -558,7 +598,10 @@ Semaphore Device::CreateSemaphore() const {
.pNext = nullptr,
.flags = 0,
};
+ return CreateSemaphore(ci);
+}
+Semaphore Device::CreateSemaphore(const VkSemaphoreCreateInfo& ci) const {
VkSemaphore object;
Check(dld->vkCreateSemaphore(handle, &ci, nullptr, &object));
return Semaphore(object, handle, *dld);
@@ -644,7 +687,7 @@ ShaderModule Device::CreateShaderModule(const VkShaderModuleCreateInfo& ci) cons
return ShaderModule(object, handle, *dld);
}
-Event Device::CreateNewEvent() const {
+Event Device::CreateEvent() const {
static constexpr VkEventCreateInfo ci{
.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO,
.pNext = nullptr,
diff --git a/src/video_core/renderer_vulkan/wrapper.h b/src/video_core/renderer_vulkan/wrapper.h
index 748a94d2f..f64919623 100644
--- a/src/video_core/renderer_vulkan/wrapper.h
+++ b/src/video_core/renderer_vulkan/wrapper.h
@@ -267,6 +267,7 @@ struct DeviceDispatch : public InstanceDispatch {
PFN_vkGetFenceStatus vkGetFenceStatus;
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
PFN_vkGetQueryPoolResults vkGetQueryPoolResults;
+ PFN_vkGetSemaphoreCounterValueKHR vkGetSemaphoreCounterValueKHR;
PFN_vkMapMemory vkMapMemory;
PFN_vkQueueSubmit vkQueueSubmit;
PFN_vkResetFences vkResetFences;
@@ -275,6 +276,7 @@ struct DeviceDispatch : public InstanceDispatch {
PFN_vkUpdateDescriptorSetWithTemplateKHR vkUpdateDescriptorSetWithTemplateKHR;
PFN_vkUpdateDescriptorSets vkUpdateDescriptorSets;
PFN_vkWaitForFences vkWaitForFences;
+ PFN_vkWaitSemaphoresKHR vkWaitSemaphoresKHR;
};
/// Loads instance agnostic function pointers.
@@ -550,7 +552,6 @@ using PipelineLayout = Handle<VkPipelineLayout, VkDevice, DeviceDispatch>;
using QueryPool = Handle<VkQueryPool, VkDevice, DeviceDispatch>;
using RenderPass = Handle<VkRenderPass, VkDevice, DeviceDispatch>;
using Sampler = Handle<VkSampler, VkDevice, DeviceDispatch>;
-using Semaphore = Handle<VkSemaphore, VkDevice, DeviceDispatch>;
using ShaderModule = Handle<VkShaderModule, VkDevice, DeviceDispatch>;
using SurfaceKHR = Handle<VkSurfaceKHR, VkInstance, InstanceDispatch>;
@@ -582,7 +583,8 @@ public:
/// Construct a queue handle.
constexpr Queue(VkQueue queue, const DeviceDispatch& dld) noexcept : queue{queue}, dld{&dld} {}
- VkResult Submit(Span<VkSubmitInfo> submit_infos, VkFence fence) const noexcept {
+ VkResult Submit(Span<VkSubmitInfo> submit_infos,
+ VkFence fence = VK_NULL_HANDLE) const noexcept {
return dld->vkQueueSubmit(queue, submit_infos.size(), submit_infos.data(), fence);
}
@@ -674,6 +676,44 @@ public:
}
};
+class Semaphore : public Handle<VkSemaphore, VkDevice, DeviceDispatch> {
+ using Handle<VkSemaphore, VkDevice, DeviceDispatch>::Handle;
+
+public:
+ [[nodiscard]] u64 GetCounter() const {
+ u64 value;
+ Check(dld->vkGetSemaphoreCounterValueKHR(owner, handle, &value));
+ return value;
+ }
+
+ /**
+ * Waits for a timeline semaphore on the host.
+ *
+ * @param value Value to wait
+ * @param timeout Time in nanoseconds to timeout
+ * @return True on successful wait, false on timeout
+ */
+ bool Wait(u64 value, u64 timeout = std::numeric_limits<u64>::max()) const {
+ const VkSemaphoreWaitInfoKHR wait_info{
+ .sType = VK_STRUCTURE_TYPE_SEMAPHORE_WAIT_INFO_KHR,
+ .pNext = nullptr,
+ .flags = 0,
+ .semaphoreCount = 1,
+ .pSemaphores = &handle,
+ .pValues = &value,
+ };
+ const VkResult result = dld->vkWaitSemaphoresKHR(owner, &wait_info, timeout);
+ switch (result) {
+ case VK_SUCCESS:
+ return true;
+ case VK_TIMEOUT:
+ return false;
+ default:
+ throw Exception(result);
+ }
+ }
+};
+
class Device : public Handle<VkDevice, NoOwner, DeviceDispatch> {
using Handle<VkDevice, NoOwner, DeviceDispatch>::Handle;
@@ -694,6 +734,8 @@ public:
Semaphore CreateSemaphore() const;
+ Semaphore CreateSemaphore(const VkSemaphoreCreateInfo& ci) const;
+
Fence CreateFence(const VkFenceCreateInfo& ci) const;
DescriptorPool CreateDescriptorPool(const VkDescriptorPoolCreateInfo& ci) const;
@@ -721,7 +763,7 @@ public:
ShaderModule CreateShaderModule(const VkShaderModuleCreateInfo& ci) const;
- Event CreateNewEvent() const;
+ Event CreateEvent() const;
SwapchainKHR CreateSwapchainKHR(const VkSwapchainCreateInfoKHR& ci) const;