summaryrefslogtreecommitdiffstats
path: root/src/video_core/renderer_vulkan
diff options
context:
space:
mode:
authorbunnei <bunneidev@gmail.com>2021-07-25 20:39:04 +0200
committerGitHub <noreply@github.com>2021-07-25 20:39:04 +0200
commit98b26b6e126d4775fdf3f773fe8a8ac808a8ff8f (patch)
tree816faa96c2c4d291825063433331a8ea4b3d08f1 /src/video_core/renderer_vulkan
parentMerge pull request #6699 from lat9nq/common-threads (diff)
parentshader: Support out of bound local memory reads and immediate writes (diff)
downloadyuzu-98b26b6e126d4775fdf3f773fe8a8ac808a8ff8f.tar
yuzu-98b26b6e126d4775fdf3f773fe8a8ac808a8ff8f.tar.gz
yuzu-98b26b6e126d4775fdf3f773fe8a8ac808a8ff8f.tar.bz2
yuzu-98b26b6e126d4775fdf3f773fe8a8ac808a8ff8f.tar.lz
yuzu-98b26b6e126d4775fdf3f773fe8a8ac808a8ff8f.tar.xz
yuzu-98b26b6e126d4775fdf3f773fe8a8ac808a8ff8f.tar.zst
yuzu-98b26b6e126d4775fdf3f773fe8a8ac808a8ff8f.zip
Diffstat (limited to 'src/video_core/renderer_vulkan')
-rw-r--r--src/video_core/renderer_vulkan/blit_image.cpp40
-rw-r--r--src/video_core/renderer_vulkan/blit_image.h2
-rw-r--r--src/video_core/renderer_vulkan/fixed_pipeline_state.cpp92
-rw-r--r--src/video_core/renderer_vulkan/fixed_pipeline_state.h79
-rw-r--r--src/video_core/renderer_vulkan/maxwell_to_vk.cpp54
-rw-r--r--src/video_core/renderer_vulkan/maxwell_to_vk.h7
-rw-r--r--src/video_core/renderer_vulkan/pipeline_helper.h154
-rw-r--r--src/video_core/renderer_vulkan/renderer_vulkan.cpp60
-rw-r--r--src/video_core/renderer_vulkan/vk_blit_screen.cpp94
-rw-r--r--src/video_core/renderer_vulkan/vk_buffer_cache.cpp68
-rw-r--r--src/video_core/renderer_vulkan/vk_buffer_cache.h22
-rw-r--r--src/video_core/renderer_vulkan/vk_compute_pass.cpp270
-rw-r--r--src/video_core/renderer_vulkan/vk_compute_pass.h34
-rw-r--r--src/video_core/renderer_vulkan/vk_compute_pipeline.cpp296
-rw-r--r--src/video_core/renderer_vulkan/vk_compute_pipeline.h72
-rw-r--r--src/video_core/renderer_vulkan/vk_descriptor_pool.cpp172
-rw-r--r--src/video_core/renderer_vulkan/vk_descriptor_pool.h70
-rw-r--r--src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp839
-rw-r--r--src/video_core/renderer_vulkan/vk_graphics_pipeline.h145
-rw-r--r--src/video_core/renderer_vulkan/vk_master_semaphore.h6
-rw-r--r--src/video_core/renderer_vulkan/vk_pipeline_cache.cpp867
-rw-r--r--src/video_core/renderer_vulkan/vk_pipeline_cache.h176
-rw-r--r--src/video_core/renderer_vulkan/vk_query_cache.cpp8
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.cpp475
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.h59
-rw-r--r--src/video_core/renderer_vulkan/vk_render_pass_cache.cpp96
-rw-r--r--src/video_core/renderer_vulkan/vk_render_pass_cache.h55
-rw-r--r--src/video_core/renderer_vulkan/vk_resource_pool.cpp12
-rw-r--r--src/video_core/renderer_vulkan/vk_resource_pool.h12
-rw-r--r--src/video_core/renderer_vulkan/vk_scheduler.cpp172
-rw-r--r--src/video_core/renderer_vulkan/vk_scheduler.h38
-rw-r--r--src/video_core/renderer_vulkan/vk_shader_decompiler.cpp3166
-rw-r--r--src/video_core/renderer_vulkan/vk_shader_decompiler.h99
-rw-r--r--src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp2
-rw-r--r--src/video_core/renderer_vulkan/vk_state_tracker.cpp56
-rw-r--r--src/video_core/renderer_vulkan/vk_state_tracker.h15
-rw-r--r--src/video_core/renderer_vulkan/vk_swapchain.cpp59
-rw-r--r--src/video_core/renderer_vulkan/vk_swapchain.h31
-rw-r--r--src/video_core/renderer_vulkan/vk_texture_cache.cpp243
-rw-r--r--src/video_core/renderer_vulkan/vk_texture_cache.h80
-rw-r--r--src/video_core/renderer_vulkan/vk_update_descriptor.cpp13
-rw-r--r--src/video_core/renderer_vulkan/vk_update_descriptor.h4
42 files changed, 2988 insertions, 5326 deletions
diff --git a/src/video_core/renderer_vulkan/blit_image.cpp b/src/video_core/renderer_vulkan/blit_image.cpp
index b7f5b8bc2..6c1b2f063 100644
--- a/src/video_core/renderer_vulkan/blit_image.cpp
+++ b/src/video_core/renderer_vulkan/blit_image.cpp
@@ -49,6 +49,16 @@ constexpr VkDescriptorSetLayoutCreateInfo ONE_TEXTURE_DESCRIPTOR_SET_LAYOUT_CREA
.bindingCount = 1,
.pBindings = &TEXTURE_DESCRIPTOR_SET_LAYOUT_BINDING<0>,
};
+template <u32 num_textures>
+inline constexpr DescriptorBankInfo TEXTURE_DESCRIPTOR_BANK_INFO{
+ .uniform_buffers = 0,
+ .storage_buffers = 0,
+ .texture_buffers = 0,
+ .image_buffers = 0,
+ .textures = num_textures,
+ .images = 0,
+ .score = 2,
+};
constexpr VkDescriptorSetLayoutCreateInfo TWO_TEXTURES_DESCRIPTOR_SET_LAYOUT_CREATE_INFO{
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
.pNext = nullptr,
@@ -323,18 +333,19 @@ void BindBlitState(vk::CommandBuffer cmdbuf, VkPipelineLayout layout, const Regi
cmdbuf.SetScissor(0, scissor);
cmdbuf.PushConstants(layout, VK_SHADER_STAGE_VERTEX_BIT, push_constants);
}
-
} // Anonymous namespace
BlitImageHelper::BlitImageHelper(const Device& device_, VKScheduler& scheduler_,
- StateTracker& state_tracker_, VKDescriptorPool& descriptor_pool)
+ StateTracker& state_tracker_, DescriptorPool& descriptor_pool)
: device{device_}, scheduler{scheduler_}, state_tracker{state_tracker_},
one_texture_set_layout(device.GetLogical().CreateDescriptorSetLayout(
ONE_TEXTURE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO)),
two_textures_set_layout(device.GetLogical().CreateDescriptorSetLayout(
TWO_TEXTURES_DESCRIPTOR_SET_LAYOUT_CREATE_INFO)),
- one_texture_descriptor_allocator(descriptor_pool, *one_texture_set_layout),
- two_textures_descriptor_allocator(descriptor_pool, *two_textures_set_layout),
+ one_texture_descriptor_allocator{
+ descriptor_pool.Allocator(*one_texture_set_layout, TEXTURE_DESCRIPTOR_BANK_INFO<1>)},
+ two_textures_descriptor_allocator{
+ descriptor_pool.Allocator(*two_textures_set_layout, TEXTURE_DESCRIPTOR_BANK_INFO<2>)},
one_texture_pipeline_layout(device.GetLogical().CreatePipelineLayout(
PipelineLayoutCreateInfo(one_texture_set_layout.address()))),
two_textures_pipeline_layout(device.GetLogical().CreatePipelineLayout(
@@ -362,14 +373,14 @@ void BlitImageHelper::BlitColor(const Framebuffer* dst_framebuffer, const ImageV
.operation = operation,
};
const VkPipelineLayout layout = *one_texture_pipeline_layout;
- const VkImageView src_view = src_image_view.Handle(ImageViewType::e2D);
+ const VkImageView src_view = src_image_view.Handle(Shader::TextureType::Color2D);
const VkSampler sampler = is_linear ? *linear_sampler : *nearest_sampler;
const VkPipeline pipeline = FindOrEmplacePipeline(key);
- const VkDescriptorSet descriptor_set = one_texture_descriptor_allocator.Commit();
scheduler.RequestRenderpass(dst_framebuffer);
- scheduler.Record([dst_region, src_region, pipeline, layout, sampler, src_view, descriptor_set,
- &device = device](vk::CommandBuffer cmdbuf) {
+ scheduler.Record([this, dst_region, src_region, pipeline, layout, sampler,
+ src_view](vk::CommandBuffer cmdbuf) {
// TODO: Barriers
+ const VkDescriptorSet descriptor_set = one_texture_descriptor_allocator.Commit();
UpdateOneTextureDescriptorSet(device, descriptor_set, sampler, src_view);
cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline);
cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_GRAPHICS, layout, 0, descriptor_set,
@@ -391,12 +402,11 @@ void BlitImageHelper::BlitDepthStencil(const Framebuffer* dst_framebuffer,
const VkPipelineLayout layout = *two_textures_pipeline_layout;
const VkSampler sampler = *nearest_sampler;
const VkPipeline pipeline = BlitDepthStencilPipeline(dst_framebuffer->RenderPass());
- const VkDescriptorSet descriptor_set = two_textures_descriptor_allocator.Commit();
scheduler.RequestRenderpass(dst_framebuffer);
scheduler.Record([dst_region, src_region, pipeline, layout, sampler, src_depth_view,
- src_stencil_view, descriptor_set,
- &device = device](vk::CommandBuffer cmdbuf) {
+ src_stencil_view, this](vk::CommandBuffer cmdbuf) {
// TODO: Barriers
+ const VkDescriptorSet descriptor_set = two_textures_descriptor_allocator.Commit();
UpdateTwoTexturesDescriptorSet(device, descriptor_set, sampler, src_depth_view,
src_stencil_view);
cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline);
@@ -416,7 +426,6 @@ void BlitImageHelper::ConvertD32ToR32(const Framebuffer* dst_framebuffer,
void BlitImageHelper::ConvertR32ToD32(const Framebuffer* dst_framebuffer,
const ImageView& src_image_view) {
-
ConvertColorToDepthPipeline(convert_r32_to_d32_pipeline, dst_framebuffer->RenderPass());
Convert(*convert_r32_to_d32_pipeline, dst_framebuffer, src_image_view);
}
@@ -436,16 +445,14 @@ void BlitImageHelper::ConvertR16ToD16(const Framebuffer* dst_framebuffer,
void BlitImageHelper::Convert(VkPipeline pipeline, const Framebuffer* dst_framebuffer,
const ImageView& src_image_view) {
const VkPipelineLayout layout = *one_texture_pipeline_layout;
- const VkImageView src_view = src_image_view.Handle(ImageViewType::e2D);
+ const VkImageView src_view = src_image_view.Handle(Shader::TextureType::Color2D);
const VkSampler sampler = *nearest_sampler;
- const VkDescriptorSet descriptor_set = one_texture_descriptor_allocator.Commit();
const VkExtent2D extent{
.width = src_image_view.size.width,
.height = src_image_view.size.height,
};
scheduler.RequestRenderpass(dst_framebuffer);
- scheduler.Record([pipeline, layout, sampler, src_view, descriptor_set, extent,
- &device = device](vk::CommandBuffer cmdbuf) {
+ scheduler.Record([pipeline, layout, sampler, src_view, extent, this](vk::CommandBuffer cmdbuf) {
const VkOffset2D offset{
.x = 0,
.y = 0,
@@ -466,6 +473,7 @@ void BlitImageHelper::Convert(VkPipeline pipeline, const Framebuffer* dst_frameb
.tex_scale = {viewport.width, viewport.height},
.tex_offset = {0.0f, 0.0f},
};
+ const VkDescriptorSet descriptor_set = one_texture_descriptor_allocator.Commit();
UpdateOneTextureDescriptorSet(device, descriptor_set, sampler, src_view);
// TODO: Barriers
diff --git a/src/video_core/renderer_vulkan/blit_image.h b/src/video_core/renderer_vulkan/blit_image.h
index 0d81a06ed..33ee095c1 100644
--- a/src/video_core/renderer_vulkan/blit_image.h
+++ b/src/video_core/renderer_vulkan/blit_image.h
@@ -31,7 +31,7 @@ struct BlitImagePipelineKey {
class BlitImageHelper {
public:
explicit BlitImageHelper(const Device& device, VKScheduler& scheduler,
- StateTracker& state_tracker, VKDescriptorPool& descriptor_pool);
+ StateTracker& state_tracker, DescriptorPool& descriptor_pool);
~BlitImageHelper();
void BlitColor(const Framebuffer* dst_framebuffer, const ImageView& src_image_view,
diff --git a/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp b/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp
index 362278f01..d70153df3 100644
--- a/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp
+++ b/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp
@@ -15,9 +15,7 @@
#include "video_core/renderer_vulkan/vk_state_tracker.h"
namespace Vulkan {
-
namespace {
-
constexpr size_t POINT = 0;
constexpr size_t LINE = 1;
constexpr size_t POLYGON = 2;
@@ -39,10 +37,20 @@ constexpr std::array POLYGON_OFFSET_ENABLE_LUT = {
POLYGON, // Patches
};
+void RefreshXfbState(VideoCommon::TransformFeedbackState& state, const Maxwell& regs) {
+ std::ranges::transform(regs.tfb_layouts, state.layouts.begin(), [](const auto& layout) {
+ return VideoCommon::TransformFeedbackState::Layout{
+ .stream = layout.stream,
+ .varying_count = layout.varying_count,
+ .stride = layout.stride,
+ };
+ });
+ state.varyings = regs.tfb_varying_locs;
+}
} // Anonymous namespace
void FixedPipelineState::Refresh(Tegra::Engines::Maxwell3D& maxwell3d,
- bool has_extended_dynamic_state) {
+ bool has_extended_dynamic_state, bool has_dynamic_vertex_input) {
const Maxwell& regs = maxwell3d.regs;
const std::array enabled_lut{
regs.polygon_offset_point_enable,
@@ -52,6 +60,9 @@ void FixedPipelineState::Refresh(Tegra::Engines::Maxwell3D& maxwell3d,
const u32 topology_index = static_cast<u32>(regs.draw.topology.Value());
raw1 = 0;
+ extended_dynamic_state.Assign(has_extended_dynamic_state ? 1 : 0);
+ dynamic_vertex_input.Assign(has_dynamic_vertex_input ? 1 : 0);
+ xfb_enabled.Assign(regs.tfb_enabled != 0);
primitive_restart_enable.Assign(regs.primitive_restart.enabled != 0 ? 1 : 0);
depth_bias_enable.Assign(enabled_lut[POLYGON_OFFSET_ENABLE_LUT[topology_index]] != 0 ? 1 : 0);
depth_clamp_disabled.Assign(regs.view_volume_clip_control.depth_clamp_disabled.Value());
@@ -63,37 +74,66 @@ void FixedPipelineState::Refresh(Tegra::Engines::Maxwell3D& maxwell3d,
tessellation_clockwise.Assign(regs.tess_mode.cw.Value());
logic_op_enable.Assign(regs.logic_op.enable != 0 ? 1 : 0);
logic_op.Assign(PackLogicOp(regs.logic_op.operation));
- rasterize_enable.Assign(regs.rasterize_enable != 0 ? 1 : 0);
topology.Assign(regs.draw.topology);
msaa_mode.Assign(regs.multisample_mode);
raw2 = 0;
+ rasterize_enable.Assign(regs.rasterize_enable != 0 ? 1 : 0);
const auto test_func =
regs.alpha_test_enabled != 0 ? regs.alpha_test_func : Maxwell::ComparisonOp::Always;
alpha_test_func.Assign(PackComparisonOp(test_func));
early_z.Assign(regs.force_early_fragment_tests != 0 ? 1 : 0);
-
+ depth_enabled.Assign(regs.zeta_enable != 0 ? 1 : 0);
+ depth_format.Assign(static_cast<u32>(regs.zeta.format));
+ y_negate.Assign(regs.screen_y_control.y_negate != 0 ? 1 : 0);
+ provoking_vertex_last.Assign(regs.provoking_vertex_last != 0 ? 1 : 0);
+ conservative_raster_enable.Assign(regs.conservative_raster_enable != 0 ? 1 : 0);
+ smooth_lines.Assign(regs.line_smooth_enable != 0 ? 1 : 0);
+
+ for (size_t i = 0; i < regs.rt.size(); ++i) {
+ color_formats[i] = static_cast<u8>(regs.rt[i].format);
+ }
alpha_test_ref = Common::BitCast<u32>(regs.alpha_test_ref);
point_size = Common::BitCast<u32>(regs.point_size);
- if (maxwell3d.dirty.flags[Dirty::InstanceDivisors]) {
- maxwell3d.dirty.flags[Dirty::InstanceDivisors] = false;
- for (size_t index = 0; index < Maxwell::NumVertexArrays; ++index) {
- const bool is_enabled = regs.instanced_arrays.IsInstancingEnabled(index);
- binding_divisors[index] = is_enabled ? regs.vertex_array[index].divisor : 0;
- }
- }
- if (maxwell3d.dirty.flags[Dirty::VertexAttributes]) {
- maxwell3d.dirty.flags[Dirty::VertexAttributes] = false;
- for (size_t index = 0; index < Maxwell::NumVertexAttributes; ++index) {
- const auto& input = regs.vertex_attrib_format[index];
- auto& attribute = attributes[index];
- attribute.raw = 0;
- attribute.enabled.Assign(input.IsConstant() ? 0 : 1);
- attribute.buffer.Assign(input.buffer);
- attribute.offset.Assign(input.offset);
- attribute.type.Assign(static_cast<u32>(input.type.Value()));
- attribute.size.Assign(static_cast<u32>(input.size.Value()));
+ if (maxwell3d.dirty.flags[Dirty::VertexInput]) {
+ if (has_dynamic_vertex_input) {
+ // Dirty flag will be reset by the command buffer update
+ static constexpr std::array LUT{
+ 0u, // Invalid
+ 1u, // SignedNorm
+ 1u, // UnsignedNorm
+ 2u, // SignedInt
+ 3u, // UnsignedInt
+ 1u, // UnsignedScaled
+ 1u, // SignedScaled
+ 1u, // Float
+ };
+ const auto& attrs = regs.vertex_attrib_format;
+ attribute_types = 0;
+ for (size_t i = 0; i < Maxwell::NumVertexAttributes; ++i) {
+ const u32 mask = attrs[i].constant != 0 ? 0 : 3;
+ const u32 type = LUT[static_cast<size_t>(attrs[i].type.Value())];
+ attribute_types |= static_cast<u64>(type & mask) << (i * 2);
+ }
+ } else {
+ maxwell3d.dirty.flags[Dirty::VertexInput] = false;
+ enabled_divisors = 0;
+ for (size_t index = 0; index < Maxwell::NumVertexArrays; ++index) {
+ const bool is_enabled = regs.instanced_arrays.IsInstancingEnabled(index);
+ binding_divisors[index] = is_enabled ? regs.vertex_array[index].divisor : 0;
+ enabled_divisors |= (is_enabled ? u64{1} : 0) << index;
+ }
+ for (size_t index = 0; index < Maxwell::NumVertexAttributes; ++index) {
+ const auto& input = regs.vertex_attrib_format[index];
+ auto& attribute = attributes[index];
+ attribute.raw = 0;
+ attribute.enabled.Assign(input.constant ? 0 : 1);
+ attribute.buffer.Assign(input.buffer);
+ attribute.offset.Assign(input.offset);
+ attribute.type.Assign(static_cast<u32>(input.type.Value()));
+ attribute.size.Assign(static_cast<u32>(input.size.Value()));
+ }
}
}
if (maxwell3d.dirty.flags[Dirty::Blending]) {
@@ -109,10 +149,12 @@ void FixedPipelineState::Refresh(Tegra::Engines::Maxwell3D& maxwell3d,
return static_cast<u16>(viewport.swizzle.raw);
});
}
- if (!has_extended_dynamic_state) {
- no_extended_dynamic_state.Assign(1);
+ if (!extended_dynamic_state) {
dynamic_state.Refresh(regs);
}
+ if (xfb_enabled) {
+ RefreshXfbState(xfb_state, regs);
+ }
}
void FixedPipelineState::BlendingAttachment::Refresh(const Maxwell& regs, size_t index) {
diff --git a/src/video_core/renderer_vulkan/fixed_pipeline_state.h b/src/video_core/renderer_vulkan/fixed_pipeline_state.h
index a0eb83a68..c9be37935 100644
--- a/src/video_core/renderer_vulkan/fixed_pipeline_state.h
+++ b/src/video_core/renderer_vulkan/fixed_pipeline_state.h
@@ -12,6 +12,7 @@
#include "video_core/engines/maxwell_3d.h"
#include "video_core/surface.h"
+#include "video_core/transform_feedback.h"
namespace Vulkan {
@@ -60,7 +61,7 @@ struct FixedPipelineState {
void Refresh(const Maxwell& regs, size_t index);
- constexpr std::array<bool, 4> Mask() const noexcept {
+ std::array<bool, 4> Mask() const noexcept {
return {mask_r != 0, mask_g != 0, mask_b != 0, mask_a != 0};
}
@@ -97,11 +98,11 @@ struct FixedPipelineState {
BitField<20, 3, u32> type;
BitField<23, 6, u32> size;
- constexpr Maxwell::VertexAttribute::Type Type() const noexcept {
+ Maxwell::VertexAttribute::Type Type() const noexcept {
return static_cast<Maxwell::VertexAttribute::Type>(type.Value());
}
- constexpr Maxwell::VertexAttribute::Size Size() const noexcept {
+ Maxwell::VertexAttribute::Size Size() const noexcept {
return static_cast<Maxwell::VertexAttribute::Size>(size.Value());
}
};
@@ -167,37 +168,53 @@ struct FixedPipelineState {
union {
u32 raw1;
- BitField<0, 1, u32> no_extended_dynamic_state;
- BitField<2, 1, u32> primitive_restart_enable;
- BitField<3, 1, u32> depth_bias_enable;
- BitField<4, 1, u32> depth_clamp_disabled;
- BitField<5, 1, u32> ndc_minus_one_to_one;
- BitField<6, 2, u32> polygon_mode;
- BitField<8, 5, u32> patch_control_points_minus_one;
- BitField<13, 2, u32> tessellation_primitive;
- BitField<15, 2, u32> tessellation_spacing;
- BitField<17, 1, u32> tessellation_clockwise;
- BitField<18, 1, u32> logic_op_enable;
- BitField<19, 4, u32> logic_op;
- BitField<23, 1, u32> rasterize_enable;
+ BitField<0, 1, u32> extended_dynamic_state;
+ BitField<1, 1, u32> dynamic_vertex_input;
+ BitField<2, 1, u32> xfb_enabled;
+ BitField<3, 1, u32> primitive_restart_enable;
+ BitField<4, 1, u32> depth_bias_enable;
+ BitField<5, 1, u32> depth_clamp_disabled;
+ BitField<6, 1, u32> ndc_minus_one_to_one;
+ BitField<7, 2, u32> polygon_mode;
+ BitField<9, 5, u32> patch_control_points_minus_one;
+ BitField<14, 2, u32> tessellation_primitive;
+ BitField<16, 2, u32> tessellation_spacing;
+ BitField<18, 1, u32> tessellation_clockwise;
+ BitField<19, 1, u32> logic_op_enable;
+ BitField<20, 4, u32> logic_op;
BitField<24, 4, Maxwell::PrimitiveTopology> topology;
BitField<28, 4, Tegra::Texture::MsaaMode> msaa_mode;
};
union {
u32 raw2;
- BitField<0, 3, u32> alpha_test_func;
- BitField<3, 1, u32> early_z;
+ BitField<0, 1, u32> rasterize_enable;
+ BitField<1, 3, u32> alpha_test_func;
+ BitField<4, 1, u32> early_z;
+ BitField<5, 1, u32> depth_enabled;
+ BitField<6, 5, u32> depth_format;
+ BitField<11, 1, u32> y_negate;
+ BitField<12, 1, u32> provoking_vertex_last;
+ BitField<13, 1, u32> conservative_raster_enable;
+ BitField<14, 1, u32> smooth_lines;
};
+ std::array<u8, Maxwell::NumRenderTargets> color_formats;
u32 alpha_test_ref;
u32 point_size;
- std::array<u32, Maxwell::NumVertexArrays> binding_divisors;
- std::array<VertexAttribute, Maxwell::NumVertexAttributes> attributes;
std::array<BlendingAttachment, Maxwell::NumRenderTargets> attachments;
std::array<u16, Maxwell::NumViewports> viewport_swizzles;
+ union {
+ u64 attribute_types; // Used with VK_EXT_vertex_input_dynamic_state
+ u64 enabled_divisors;
+ };
+ std::array<VertexAttribute, Maxwell::NumVertexAttributes> attributes;
+ std::array<u32, Maxwell::NumVertexArrays> binding_divisors;
+
DynamicState dynamic_state;
+ VideoCommon::TransformFeedbackState xfb_state;
- void Refresh(Tegra::Engines::Maxwell3D& maxwell3d, bool has_extended_dynamic_state);
+ void Refresh(Tegra::Engines::Maxwell3D& maxwell3d, bool has_extended_dynamic_state,
+ bool has_dynamic_vertex_input);
size_t Hash() const noexcept;
@@ -208,8 +225,24 @@ struct FixedPipelineState {
}
size_t Size() const noexcept {
- const size_t total_size = sizeof *this;
- return total_size - (no_extended_dynamic_state != 0 ? 0 : sizeof(DynamicState));
+ if (xfb_enabled) {
+ // When transform feedback is enabled, use the whole struct
+ return sizeof(*this);
+ }
+ if (dynamic_vertex_input) {
+ // Exclude dynamic state and attributes
+ return offsetof(FixedPipelineState, attributes);
+ }
+ if (extended_dynamic_state) {
+ // Exclude dynamic state
+ return offsetof(FixedPipelineState, dynamic_state);
+ }
+ // Default
+ return offsetof(FixedPipelineState, xfb_state);
+ }
+
+ u32 DynamicAttributeType(size_t index) const noexcept {
+ return (attribute_types >> (index * 2)) & 0b11;
}
};
static_assert(std::has_unique_object_representations_v<FixedPipelineState>);
diff --git a/src/video_core/renderer_vulkan/maxwell_to_vk.cpp b/src/video_core/renderer_vulkan/maxwell_to_vk.cpp
index f088447e9..68a23b602 100644
--- a/src/video_core/renderer_vulkan/maxwell_to_vk.cpp
+++ b/src/video_core/renderer_vulkan/maxwell_to_vk.cpp
@@ -157,7 +157,7 @@ struct FormatTuple {
{VK_FORMAT_R32_SFLOAT, Attachable | Storage}, // R32_FLOAT
{VK_FORMAT_R16_SFLOAT, Attachable | Storage}, // R16_FLOAT
{VK_FORMAT_R16_UNORM, Attachable | Storage}, // R16_UNORM
- {VK_FORMAT_UNDEFINED}, // R16_SNORM
+ {VK_FORMAT_R16_SNORM, Attachable | Storage}, // R16_SNORM
{VK_FORMAT_R16_UINT, Attachable | Storage}, // R16_UINT
{VK_FORMAT_UNDEFINED}, // R16_SINT
{VK_FORMAT_R16G16_UNORM, Attachable | Storage}, // R16G16_UNORM
@@ -266,19 +266,20 @@ FormatInfo SurfaceFormat(const Device& device, FormatType format_type, bool with
return {device.GetSupportedFormat(tuple.format, usage, format_type), attachable, storage};
}
-VkShaderStageFlagBits ShaderStage(Tegra::Engines::ShaderType stage) {
+VkShaderStageFlagBits ShaderStage(Shader::Stage stage) {
switch (stage) {
- case Tegra::Engines::ShaderType::Vertex:
+ case Shader::Stage::VertexA:
+ case Shader::Stage::VertexB:
return VK_SHADER_STAGE_VERTEX_BIT;
- case Tegra::Engines::ShaderType::TesselationControl:
+ case Shader::Stage::TessellationControl:
return VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT;
- case Tegra::Engines::ShaderType::TesselationEval:
+ case Shader::Stage::TessellationEval:
return VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT;
- case Tegra::Engines::ShaderType::Geometry:
+ case Shader::Stage::Geometry:
return VK_SHADER_STAGE_GEOMETRY_BIT;
- case Tegra::Engines::ShaderType::Fragment:
+ case Shader::Stage::Fragment:
return VK_SHADER_STAGE_FRAGMENT_BIT;
- case Tegra::Engines::ShaderType::Compute:
+ case Shader::Stage::Compute:
return VK_SHADER_STAGE_COMPUTE_BIT;
}
UNIMPLEMENTED_MSG("Unimplemented shader stage={}", stage);
@@ -685,6 +686,19 @@ VkCullModeFlagBits CullFace(Maxwell::CullFace cull_face) {
return {};
}
+VkPolygonMode PolygonMode(Maxwell::PolygonMode polygon_mode) {
+ switch (polygon_mode) {
+ case Maxwell::PolygonMode::Point:
+ return VK_POLYGON_MODE_POINT;
+ case Maxwell::PolygonMode::Line:
+ return VK_POLYGON_MODE_LINE;
+ case Maxwell::PolygonMode::Fill:
+ return VK_POLYGON_MODE_FILL;
+ }
+ UNIMPLEMENTED_MSG("Unimplemented polygon mode={}", polygon_mode);
+ return {};
+}
+
VkComponentSwizzle SwizzleSource(Tegra::Texture::SwizzleSource swizzle) {
switch (swizzle) {
case Tegra::Texture::SwizzleSource::Zero:
@@ -741,4 +755,28 @@ VkSamplerReductionMode SamplerReduction(Tegra::Texture::SamplerReduction reducti
return VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE_EXT;
}
+VkSampleCountFlagBits MsaaMode(Tegra::Texture::MsaaMode msaa_mode) {
+ switch (msaa_mode) {
+ case Tegra::Texture::MsaaMode::Msaa1x1:
+ return VK_SAMPLE_COUNT_1_BIT;
+ case Tegra::Texture::MsaaMode::Msaa2x1:
+ case Tegra::Texture::MsaaMode::Msaa2x1_D3D:
+ return VK_SAMPLE_COUNT_2_BIT;
+ case Tegra::Texture::MsaaMode::Msaa2x2:
+ case Tegra::Texture::MsaaMode::Msaa2x2_VC4:
+ case Tegra::Texture::MsaaMode::Msaa2x2_VC12:
+ return VK_SAMPLE_COUNT_4_BIT;
+ case Tegra::Texture::MsaaMode::Msaa4x2:
+ case Tegra::Texture::MsaaMode::Msaa4x2_D3D:
+ case Tegra::Texture::MsaaMode::Msaa4x2_VC8:
+ case Tegra::Texture::MsaaMode::Msaa4x2_VC24:
+ return VK_SAMPLE_COUNT_8_BIT;
+ case Tegra::Texture::MsaaMode::Msaa4x4:
+ return VK_SAMPLE_COUNT_16_BIT;
+ default:
+ UNREACHABLE_MSG("Invalid msaa_mode={}", static_cast<int>(msaa_mode));
+ return VK_SAMPLE_COUNT_1_BIT;
+ }
+}
+
} // namespace Vulkan::MaxwellToVK
diff --git a/src/video_core/renderer_vulkan/maxwell_to_vk.h b/src/video_core/renderer_vulkan/maxwell_to_vk.h
index e3e06ba38..8a9616039 100644
--- a/src/video_core/renderer_vulkan/maxwell_to_vk.h
+++ b/src/video_core/renderer_vulkan/maxwell_to_vk.h
@@ -5,6 +5,7 @@
#pragma once
#include "common/common_types.h"
+#include "shader_recompiler/stage.h"
#include "video_core/engines/maxwell_3d.h"
#include "video_core/surface.h"
#include "video_core/textures/texture.h"
@@ -45,7 +46,7 @@ struct FormatInfo {
[[nodiscard]] FormatInfo SurfaceFormat(const Device& device, FormatType format_type, bool with_srgb,
PixelFormat pixel_format);
-VkShaderStageFlagBits ShaderStage(Tegra::Engines::ShaderType stage);
+VkShaderStageFlagBits ShaderStage(Shader::Stage stage);
VkPrimitiveTopology PrimitiveTopology(const Device& device, Maxwell::PrimitiveTopology topology);
@@ -65,10 +66,14 @@ VkFrontFace FrontFace(Maxwell::FrontFace front_face);
VkCullModeFlagBits CullFace(Maxwell::CullFace cull_face);
+VkPolygonMode PolygonMode(Maxwell::PolygonMode polygon_mode);
+
VkComponentSwizzle SwizzleSource(Tegra::Texture::SwizzleSource swizzle);
VkViewportCoordinateSwizzleNV ViewportSwizzle(Maxwell::ViewportSwizzle swizzle);
VkSamplerReductionMode SamplerReduction(Tegra::Texture::SamplerReduction reduction);
+VkSampleCountFlagBits MsaaMode(Tegra::Texture::MsaaMode msaa_mode);
+
} // namespace Vulkan::MaxwellToVK
diff --git a/src/video_core/renderer_vulkan/pipeline_helper.h b/src/video_core/renderer_vulkan/pipeline_helper.h
new file mode 100644
index 000000000..4847db6b6
--- /dev/null
+++ b/src/video_core/renderer_vulkan/pipeline_helper.h
@@ -0,0 +1,154 @@
+// Copyright 2021 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <cstddef>
+
+#include <boost/container/small_vector.hpp>
+
+#include "common/assert.h"
+#include "common/common_types.h"
+#include "shader_recompiler/shader_info.h"
+#include "video_core/renderer_vulkan/vk_texture_cache.h"
+#include "video_core/renderer_vulkan/vk_update_descriptor.h"
+#include "video_core/texture_cache/texture_cache.h"
+#include "video_core/texture_cache/types.h"
+#include "video_core/textures/texture.h"
+#include "video_core/vulkan_common/vulkan_device.h"
+
+namespace Vulkan {
+
+class DescriptorLayoutBuilder {
+public:
+ DescriptorLayoutBuilder(const Device& device_) : device{&device_} {}
+
+ bool CanUsePushDescriptor() const noexcept {
+ return device->IsKhrPushDescriptorSupported() &&
+ num_descriptors <= device->MaxPushDescriptors();
+ }
+
+ vk::DescriptorSetLayout CreateDescriptorSetLayout(bool use_push_descriptor) const {
+ if (bindings.empty()) {
+ return nullptr;
+ }
+ const VkDescriptorSetLayoutCreateFlags flags =
+ use_push_descriptor ? VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR : 0;
+ return device->GetLogical().CreateDescriptorSetLayout({
+ .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
+ .pNext = nullptr,
+ .flags = flags,
+ .bindingCount = static_cast<u32>(bindings.size()),
+ .pBindings = bindings.data(),
+ });
+ }
+
+ vk::DescriptorUpdateTemplateKHR CreateTemplate(VkDescriptorSetLayout descriptor_set_layout,
+ VkPipelineLayout pipeline_layout,
+ bool use_push_descriptor) const {
+ if (entries.empty()) {
+ return nullptr;
+ }
+ const VkDescriptorUpdateTemplateType type =
+ use_push_descriptor ? VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR
+ : VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET_KHR;
+ return device->GetLogical().CreateDescriptorUpdateTemplateKHR({
+ .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO_KHR,
+ .pNext = nullptr,
+ .flags = 0,
+ .descriptorUpdateEntryCount = static_cast<u32>(entries.size()),
+ .pDescriptorUpdateEntries = entries.data(),
+ .templateType = type,
+ .descriptorSetLayout = descriptor_set_layout,
+ .pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS,
+ .pipelineLayout = pipeline_layout,
+ .set = 0,
+ });
+ }
+
+ vk::PipelineLayout CreatePipelineLayout(VkDescriptorSetLayout descriptor_set_layout) const {
+ return device->GetLogical().CreatePipelineLayout({
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
+ .pNext = nullptr,
+ .flags = 0,
+ .setLayoutCount = descriptor_set_layout ? 1U : 0U,
+ .pSetLayouts = bindings.empty() ? nullptr : &descriptor_set_layout,
+ .pushConstantRangeCount = 0,
+ .pPushConstantRanges = nullptr,
+ });
+ }
+
+ void Add(const Shader::Info& info, VkShaderStageFlags stage) {
+ Add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, stage, info.constant_buffer_descriptors);
+ Add(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, stage, info.storage_buffers_descriptors);
+ Add(VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, stage, info.texture_buffer_descriptors);
+ Add(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, stage, info.image_buffer_descriptors);
+ Add(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, stage, info.texture_descriptors);
+ Add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, stage, info.image_descriptors);
+ }
+
+private:
+ template <typename Descriptors>
+ void Add(VkDescriptorType type, VkShaderStageFlags stage, const Descriptors& descriptors) {
+ const size_t num{descriptors.size()};
+ for (size_t i = 0; i < num; ++i) {
+ bindings.push_back({
+ .binding = binding,
+ .descriptorType = type,
+ .descriptorCount = descriptors[i].count,
+ .stageFlags = stage,
+ .pImmutableSamplers = nullptr,
+ });
+ entries.push_back({
+ .dstBinding = binding,
+ .dstArrayElement = 0,
+ .descriptorCount = descriptors[i].count,
+ .descriptorType = type,
+ .offset = offset,
+ .stride = sizeof(DescriptorUpdateEntry),
+ });
+ ++binding;
+ num_descriptors += descriptors[i].count;
+ offset += sizeof(DescriptorUpdateEntry);
+ }
+ }
+
+ const Device* device{};
+ boost::container::small_vector<VkDescriptorSetLayoutBinding, 32> bindings;
+ boost::container::small_vector<VkDescriptorUpdateTemplateEntryKHR, 32> entries;
+ u32 binding{};
+ u32 num_descriptors{};
+ size_t offset{};
+};
+
+inline void PushImageDescriptors(const Shader::Info& info, const VkSampler*& samplers,
+ const ImageId*& image_view_ids, TextureCache& texture_cache,
+ VKUpdateDescriptorQueue& update_descriptor_queue) {
+ for (const auto& desc : info.texture_buffer_descriptors) {
+ image_view_ids += desc.count;
+ }
+ for (const auto& desc : info.image_buffer_descriptors) {
+ image_view_ids += desc.count;
+ }
+ for (const auto& desc : info.texture_descriptors) {
+ for (u32 index = 0; index < desc.count; ++index) {
+ const VkSampler sampler{*(samplers++)};
+ ImageView& image_view{texture_cache.GetImageView(*(image_view_ids++))};
+ const VkImageView vk_image_view{image_view.Handle(desc.type)};
+ update_descriptor_queue.AddSampledImage(vk_image_view, sampler);
+ }
+ }
+ for (const auto& desc : info.image_descriptors) {
+ for (u32 index = 0; index < desc.count; ++index) {
+ ImageView& image_view{texture_cache.GetImageView(*(image_view_ids++))};
+ if (desc.is_written) {
+ texture_cache.MarkModification(image_view.image_id);
+ }
+ const VkImageView vk_image_view{image_view.StorageView(desc.type, desc.format)};
+ update_descriptor_queue.AddImage(vk_image_view);
+ }
+ }
+}
+
+} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/renderer_vulkan.cpp b/src/video_core/renderer_vulkan/renderer_vulkan.cpp
index bec3a81d9..a8d04dc61 100644
--- a/src/video_core/renderer_vulkan/renderer_vulkan.cpp
+++ b/src/video_core/renderer_vulkan/renderer_vulkan.cpp
@@ -130,35 +130,45 @@ void RendererVulkan::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) {
if (!framebuffer) {
return;
}
- const auto& layout = render_window.GetFramebufferLayout();
- if (layout.width > 0 && layout.height > 0 && render_window.IsShown()) {
- const VAddr framebuffer_addr = framebuffer->address + framebuffer->offset;
- const bool use_accelerated =
- rasterizer.AccelerateDisplay(*framebuffer, framebuffer_addr, framebuffer->stride);
- const bool is_srgb = use_accelerated && screen_info.is_srgb;
- if (swapchain.HasFramebufferChanged(layout) || swapchain.GetSrgbState() != is_srgb) {
- swapchain.Create(layout.width, layout.height, is_srgb);
- blit_screen.Recreate();
- }
-
- scheduler.WaitWorker();
-
- while (!swapchain.AcquireNextImage()) {
- swapchain.Create(layout.width, layout.height, is_srgb);
- blit_screen.Recreate();
+ SCOPE_EXIT({ render_window.OnFrameDisplayed(); });
+ if (!render_window.IsShown()) {
+ return;
+ }
+ const VAddr framebuffer_addr = framebuffer->address + framebuffer->offset;
+ const bool use_accelerated =
+ rasterizer.AccelerateDisplay(*framebuffer, framebuffer_addr, framebuffer->stride);
+ const bool is_srgb = use_accelerated && screen_info.is_srgb;
+
+ bool has_been_recreated = false;
+ const auto recreate_swapchain = [&] {
+ if (!has_been_recreated) {
+ has_been_recreated = true;
+ scheduler.WaitWorker();
}
- const VkSemaphore render_semaphore = blit_screen.Draw(*framebuffer, use_accelerated);
-
- scheduler.Flush(render_semaphore);
-
- if (swapchain.Present(render_semaphore)) {
- blit_screen.Recreate();
+ const Layout::FramebufferLayout layout = render_window.GetFramebufferLayout();
+ swapchain.Create(layout.width, layout.height, is_srgb);
+ };
+ if (swapchain.IsSubOptimal() || swapchain.HasColorSpaceChanged(is_srgb)) {
+ recreate_swapchain();
+ }
+ bool is_outdated;
+ do {
+ swapchain.AcquireNextImage();
+ is_outdated = swapchain.IsOutDated();
+ if (is_outdated) {
+ recreate_swapchain();
}
- gpu.RendererFrameEndNotify();
- rasterizer.TickFrame();
+ } while (is_outdated);
+ if (has_been_recreated) {
+ blit_screen.Recreate();
}
+ const VkSemaphore render_semaphore = blit_screen.Draw(*framebuffer, use_accelerated);
+ scheduler.Flush(render_semaphore);
+ scheduler.WaitWorker();
+ swapchain.Present(render_semaphore);
- render_window.OnFrameDisplayed();
+ gpu.RendererFrameEndNotify();
+ rasterizer.TickFrame();
}
void RendererVulkan::Report() const {
diff --git a/src/video_core/renderer_vulkan/vk_blit_screen.cpp b/src/video_core/renderer_vulkan/vk_blit_screen.cpp
index 363134129..516f428e7 100644
--- a/src/video_core/renderer_vulkan/vk_blit_screen.cpp
+++ b/src/video_core/renderer_vulkan/vk_blit_screen.cpp
@@ -184,47 +184,43 @@ VkSemaphore VKBlitScreen::Draw(const Tegra::FramebufferConfig& framebuffer, bool
.depth = 1,
},
};
- scheduler.Record(
- [buffer = *buffer, image = *raw_images[image_index], copy](vk::CommandBuffer cmdbuf) {
- const VkImageMemoryBarrier base_barrier{
- .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
- .pNext = nullptr,
- .srcAccessMask = 0,
- .dstAccessMask = 0,
- .oldLayout = VK_IMAGE_LAYOUT_GENERAL,
- .newLayout = VK_IMAGE_LAYOUT_GENERAL,
- .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
- .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
- .image = image,
- .subresourceRange =
- {
- .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
- .baseMipLevel = 0,
- .levelCount = 1,
- .baseArrayLayer = 0,
- .layerCount = 1,
- },
- };
- VkImageMemoryBarrier read_barrier = base_barrier;
- read_barrier.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT;
- read_barrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
- read_barrier.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED;
-
- VkImageMemoryBarrier write_barrier = base_barrier;
- write_barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
- write_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
-
- cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
- 0, read_barrier);
- cmdbuf.CopyBufferToImage(buffer, image, VK_IMAGE_LAYOUT_GENERAL, copy);
- cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT,
- VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, 0, write_barrier);
- });
+ scheduler.Record([this, copy, image_index](vk::CommandBuffer cmdbuf) {
+ const VkImage image = *raw_images[image_index];
+ const VkImageMemoryBarrier base_barrier{
+ .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
+ .pNext = nullptr,
+ .srcAccessMask = 0,
+ .dstAccessMask = 0,
+ .oldLayout = VK_IMAGE_LAYOUT_GENERAL,
+ .newLayout = VK_IMAGE_LAYOUT_GENERAL,
+ .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
+ .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
+ .image = image,
+ .subresourceRange{
+ .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
+ .baseMipLevel = 0,
+ .levelCount = 1,
+ .baseArrayLayer = 0,
+ .layerCount = 1,
+ },
+ };
+ VkImageMemoryBarrier read_barrier = base_barrier;
+ read_barrier.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT;
+ read_barrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
+ read_barrier.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED;
+
+ VkImageMemoryBarrier write_barrier = base_barrier;
+ write_barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
+ write_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
+
+ cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0,
+ read_barrier);
+ cmdbuf.CopyBufferToImage(*buffer, image, VK_IMAGE_LAYOUT_GENERAL, copy);
+ cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT,
+ VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, 0, write_barrier);
+ });
}
- scheduler.Record([renderpass = *renderpass, framebuffer = *framebuffers[image_index],
- descriptor_set = descriptor_sets[image_index], buffer = *buffer,
- size = swapchain.GetSize(), pipeline = *pipeline,
- layout = *pipeline_layout](vk::CommandBuffer cmdbuf) {
+ scheduler.Record([this, image_index, size = swapchain.GetSize()](vk::CommandBuffer cmdbuf) {
const f32 bg_red = Settings::values.bg_red.GetValue() / 255.0f;
const f32 bg_green = Settings::values.bg_green.GetValue() / 255.0f;
const f32 bg_blue = Settings::values.bg_blue.GetValue() / 255.0f;
@@ -234,8 +230,8 @@ VkSemaphore VKBlitScreen::Draw(const Tegra::FramebufferConfig& framebuffer, bool
const VkRenderPassBeginInfo renderpass_bi{
.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO,
.pNext = nullptr,
- .renderPass = renderpass,
- .framebuffer = framebuffer,
+ .renderPass = *renderpass,
+ .framebuffer = *framebuffers[image_index],
.renderArea =
{
.offset = {0, 0},
@@ -257,12 +253,13 @@ VkSemaphore VKBlitScreen::Draw(const Tegra::FramebufferConfig& framebuffer, bool
.extent = size,
};
cmdbuf.BeginRenderPass(renderpass_bi, VK_SUBPASS_CONTENTS_INLINE);
- cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline);
+ cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_GRAPHICS, *pipeline);
cmdbuf.SetViewport(0, viewport);
cmdbuf.SetScissor(0, scissor);
- cmdbuf.BindVertexBuffer(0, buffer, offsetof(BufferData, vertices));
- cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_GRAPHICS, layout, 0, descriptor_set, {});
+ cmdbuf.BindVertexBuffer(0, *buffer, offsetof(BufferData, vertices));
+ cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_GRAPHICS, *pipeline_layout, 0,
+ descriptor_sets[image_index], {});
cmdbuf.Draw(4, 1, 0, 0);
cmdbuf.EndRenderPass();
});
@@ -304,8 +301,7 @@ void VKBlitScreen::CreateShaders() {
void VKBlitScreen::CreateSemaphores() {
semaphores.resize(image_count);
- std::generate(semaphores.begin(), semaphores.end(),
- [this] { return device.GetLogical().CreateSemaphore(); });
+ std::ranges::generate(semaphores, [this] { return device.GetLogical().CreateSemaphore(); });
}
void VKBlitScreen::CreateDescriptorPool() {
@@ -633,8 +629,8 @@ void VKBlitScreen::CreateFramebuffers() {
}
void VKBlitScreen::ReleaseRawImages() {
- for (std::size_t i = 0; i < raw_images.size(); ++i) {
- scheduler.Wait(resource_ticks.at(i));
+ for (const u64 tick : resource_ticks) {
+ scheduler.Wait(tick);
}
raw_images.clear();
raw_buffer_commits.clear();
diff --git a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp
index 0def1e769..f4b3ee95c 100644
--- a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp
@@ -60,38 +60,74 @@ std::array<T, 6> MakeQuadIndices(u32 quad, u32 first) {
}
return indices;
}
-} // Anonymous namespace
-
-Buffer::Buffer(BufferCacheRuntime&, VideoCommon::NullBufferParams null_params)
- : VideoCommon::BufferBase<VideoCore::RasterizerInterface>(null_params) {}
-Buffer::Buffer(BufferCacheRuntime& runtime, VideoCore::RasterizerInterface& rasterizer_,
- VAddr cpu_addr_, u64 size_bytes_)
- : VideoCommon::BufferBase<VideoCore::RasterizerInterface>(rasterizer_, cpu_addr_, size_bytes_) {
- buffer = runtime.device.GetLogical().CreateBuffer(VkBufferCreateInfo{
+vk::Buffer CreateBuffer(const Device& device, u64 size) {
+ VkBufferUsageFlags flags =
+ VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT |
+ VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT |
+ VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT |
+ VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
+ if (device.IsExtTransformFeedbackSupported()) {
+ flags |= VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_BUFFER_BIT_EXT;
+ }
+ return device.GetLogical().CreateBuffer({
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
- .size = SizeBytes(),
- .usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT |
- VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT |
- VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT |
- VK_BUFFER_USAGE_STORAGE_BUFFER_BIT | VK_BUFFER_USAGE_INDEX_BUFFER_BIT |
- VK_BUFFER_USAGE_VERTEX_BUFFER_BIT,
+ .size = size,
+ .usage = flags,
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
.queueFamilyIndexCount = 0,
.pQueueFamilyIndices = nullptr,
});
+}
+} // Anonymous namespace
+
+Buffer::Buffer(BufferCacheRuntime&, VideoCommon::NullBufferParams null_params)
+ : VideoCommon::BufferBase<VideoCore::RasterizerInterface>(null_params) {}
+
+Buffer::Buffer(BufferCacheRuntime& runtime, VideoCore::RasterizerInterface& rasterizer_,
+ VAddr cpu_addr_, u64 size_bytes_)
+ : VideoCommon::BufferBase<VideoCore::RasterizerInterface>(rasterizer_, cpu_addr_, size_bytes_),
+ device{&runtime.device}, buffer{CreateBuffer(*device, SizeBytes())},
+ commit{runtime.memory_allocator.Commit(buffer, MemoryUsage::DeviceLocal)} {
if (runtime.device.HasDebuggingToolAttached()) {
buffer.SetObjectNameEXT(fmt::format("Buffer 0x{:x}", CpuAddr()).c_str());
}
- commit = runtime.memory_allocator.Commit(buffer, MemoryUsage::DeviceLocal);
+}
+
+VkBufferView Buffer::View(u32 offset, u32 size, VideoCore::Surface::PixelFormat format) {
+ if (!device) {
+ // Null buffer, return a null descriptor
+ return VK_NULL_HANDLE;
+ }
+ const auto it{std::ranges::find_if(views, [offset, size, format](const BufferView& view) {
+ return offset == view.offset && size == view.size && format == view.format;
+ })};
+ if (it != views.end()) {
+ return *it->handle;
+ }
+ views.push_back({
+ .offset = offset,
+ .size = size,
+ .format = format,
+ .handle = device->GetLogical().CreateBufferView({
+ .sType = VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO,
+ .pNext = nullptr,
+ .flags = 0,
+ .buffer = *buffer,
+ .format = MaxwellToVK::SurfaceFormat(*device, FormatType::Buffer, false, format).format,
+ .offset = offset,
+ .range = size,
+ }),
+ });
+ return *views.back().handle;
}
BufferCacheRuntime::BufferCacheRuntime(const Device& device_, MemoryAllocator& memory_allocator_,
VKScheduler& scheduler_, StagingBufferPool& staging_pool_,
VKUpdateDescriptorQueue& update_descriptor_queue_,
- VKDescriptorPool& descriptor_pool)
+ DescriptorPool& descriptor_pool)
: device{device_}, memory_allocator{memory_allocator_}, scheduler{scheduler_},
staging_pool{staging_pool_}, update_descriptor_queue{update_descriptor_queue_},
uint8_pass(device, scheduler, descriptor_pool, staging_pool, update_descriptor_queue),
diff --git a/src/video_core/renderer_vulkan/vk_buffer_cache.h b/src/video_core/renderer_vulkan/vk_buffer_cache.h
index 3bb81d5b3..c27402ff0 100644
--- a/src/video_core/renderer_vulkan/vk_buffer_cache.h
+++ b/src/video_core/renderer_vulkan/vk_buffer_cache.h
@@ -9,13 +9,14 @@
#include "video_core/renderer_vulkan/vk_compute_pass.h"
#include "video_core/renderer_vulkan/vk_staging_buffer_pool.h"
#include "video_core/renderer_vulkan/vk_update_descriptor.h"
+#include "video_core/surface.h"
#include "video_core/vulkan_common/vulkan_memory_allocator.h"
#include "video_core/vulkan_common/vulkan_wrapper.h"
namespace Vulkan {
class Device;
-class VKDescriptorPool;
+class DescriptorPool;
class VKScheduler;
class BufferCacheRuntime;
@@ -26,6 +27,8 @@ public:
explicit Buffer(BufferCacheRuntime& runtime, VideoCore::RasterizerInterface& rasterizer_,
VAddr cpu_addr_, u64 size_bytes_);
+ [[nodiscard]] VkBufferView View(u32 offset, u32 size, VideoCore::Surface::PixelFormat format);
+
[[nodiscard]] VkBuffer Handle() const noexcept {
return *buffer;
}
@@ -35,8 +38,17 @@ public:
}
private:
+ struct BufferView {
+ u32 offset;
+ u32 size;
+ VideoCore::Surface::PixelFormat format;
+ vk::BufferView handle;
+ };
+
+ const Device* device{};
vk::Buffer buffer;
MemoryCommit commit;
+ std::vector<BufferView> views;
};
class BufferCacheRuntime {
@@ -49,7 +61,7 @@ public:
explicit BufferCacheRuntime(const Device& device_, MemoryAllocator& memory_manager_,
VKScheduler& scheduler_, StagingBufferPool& staging_pool_,
VKUpdateDescriptorQueue& update_descriptor_queue_,
- VKDescriptorPool& descriptor_pool);
+ DescriptorPool& descriptor_pool);
void Finish();
@@ -87,6 +99,11 @@ public:
BindBuffer(buffer, offset, size);
}
+ void BindTextureBuffer(Buffer& buffer, u32 offset, u32 size,
+ VideoCore::Surface::PixelFormat format) {
+ update_descriptor_queue.AddTexelBuffer(buffer.View(offset, size, format));
+ }
+
private:
void BindBuffer(VkBuffer buffer, u32 offset, u32 size) {
update_descriptor_queue.AddBuffer(buffer, offset, size);
@@ -124,6 +141,7 @@ struct BufferCacheParams {
static constexpr bool NEEDS_BIND_UNIFORM_INDEX = false;
static constexpr bool NEEDS_BIND_STORAGE_INDEX = false;
static constexpr bool USE_MEMORY_MAPS = true;
+ static constexpr bool SEPARATE_IMAGE_BUFFER_BINDINGS = false;
};
using BufferCache = VideoCommon::BufferCache<BufferCacheParams>;
diff --git a/src/video_core/renderer_vulkan/vk_compute_pass.cpp b/src/video_core/renderer_vulkan/vk_compute_pass.cpp
index 4181d83ee..8e426ce2c 100644
--- a/src/video_core/renderer_vulkan/vk_compute_pass.cpp
+++ b/src/video_core/renderer_vulkan/vk_compute_pass.cpp
@@ -41,80 +41,92 @@ constexpr u32 ASTC_BINDING_SWIZZLE_BUFFER = 2;
constexpr u32 ASTC_BINDING_OUTPUT_IMAGE = 3;
constexpr size_t ASTC_NUM_BINDINGS = 4;
-VkPushConstantRange BuildComputePushConstantRange(std::size_t size) {
- return {
- .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
- .offset = 0,
- .size = static_cast<u32>(size),
- };
-}
-
-std::array<VkDescriptorSetLayoutBinding, 2> BuildInputOutputDescriptorSetBindings() {
- return {{
- {
- .binding = 0,
- .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
- .descriptorCount = 1,
- .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
- .pImmutableSamplers = nullptr,
- },
- {
- .binding = 1,
- .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
- .descriptorCount = 1,
- .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
- .pImmutableSamplers = nullptr,
- },
- }};
-}
+template <size_t size>
+inline constexpr VkPushConstantRange COMPUTE_PUSH_CONSTANT_RANGE{
+ .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
+ .offset = 0,
+ .size = static_cast<u32>(size),
+};
-std::array<VkDescriptorSetLayoutBinding, ASTC_NUM_BINDINGS> BuildASTCDescriptorSetBindings() {
- return {{
- {
- .binding = ASTC_BINDING_INPUT_BUFFER,
- .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
- .descriptorCount = 1,
- .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
- .pImmutableSamplers = nullptr,
- },
- {
- .binding = ASTC_BINDING_ENC_BUFFER,
- .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
- .descriptorCount = 1,
- .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
- .pImmutableSamplers = nullptr,
- },
- {
- .binding = ASTC_BINDING_SWIZZLE_BUFFER,
- .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
- .descriptorCount = 1,
- .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
- .pImmutableSamplers = nullptr,
- },
- {
- .binding = ASTC_BINDING_OUTPUT_IMAGE,
- .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
- .descriptorCount = 1,
- .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
- .pImmutableSamplers = nullptr,
- },
- }};
-}
+constexpr std::array<VkDescriptorSetLayoutBinding, 2> INPUT_OUTPUT_DESCRIPTOR_SET_BINDINGS{{
+ {
+ .binding = 0,
+ .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
+ .descriptorCount = 1,
+ .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
+ .pImmutableSamplers = nullptr,
+ },
+ {
+ .binding = 1,
+ .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
+ .descriptorCount = 1,
+ .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
+ .pImmutableSamplers = nullptr,
+ },
+}};
+
+constexpr DescriptorBankInfo INPUT_OUTPUT_BANK_INFO{
+ .uniform_buffers = 0,
+ .storage_buffers = 2,
+ .texture_buffers = 0,
+ .image_buffers = 0,
+ .textures = 0,
+ .images = 0,
+ .score = 2,
+};
-VkDescriptorUpdateTemplateEntryKHR BuildInputOutputDescriptorUpdateTemplate() {
- return {
- .dstBinding = 0,
- .dstArrayElement = 0,
- .descriptorCount = 2,
+constexpr std::array<VkDescriptorSetLayoutBinding, 4> ASTC_DESCRIPTOR_SET_BINDINGS{{
+ {
+ .binding = ASTC_BINDING_INPUT_BUFFER,
.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
- .offset = 0,
- .stride = sizeof(DescriptorUpdateEntry),
- };
-}
+ .descriptorCount = 1,
+ .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
+ .pImmutableSamplers = nullptr,
+ },
+ {
+ .binding = ASTC_BINDING_ENC_BUFFER,
+ .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
+ .descriptorCount = 1,
+ .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
+ .pImmutableSamplers = nullptr,
+ },
+ {
+ .binding = ASTC_BINDING_SWIZZLE_BUFFER,
+ .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
+ .descriptorCount = 1,
+ .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
+ .pImmutableSamplers = nullptr,
+ },
+ {
+ .binding = ASTC_BINDING_OUTPUT_IMAGE,
+ .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
+ .descriptorCount = 1,
+ .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
+ .pImmutableSamplers = nullptr,
+ },
+}};
+
+constexpr DescriptorBankInfo ASTC_BANK_INFO{
+ .uniform_buffers = 0,
+ .storage_buffers = 3,
+ .texture_buffers = 0,
+ .image_buffers = 0,
+ .textures = 0,
+ .images = 1,
+ .score = 4,
+};
-std::array<VkDescriptorUpdateTemplateEntryKHR, ASTC_NUM_BINDINGS>
-BuildASTCPassDescriptorUpdateTemplateEntry() {
- return {{
+constexpr VkDescriptorUpdateTemplateEntryKHR INPUT_OUTPUT_DESCRIPTOR_UPDATE_TEMPLATE{
+ .dstBinding = 0,
+ .dstArrayElement = 0,
+ .descriptorCount = 2,
+ .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
+ .offset = 0,
+ .stride = sizeof(DescriptorUpdateEntry),
+};
+
+constexpr std::array<VkDescriptorUpdateTemplateEntryKHR, ASTC_NUM_BINDINGS>
+ ASTC_PASS_DESCRIPTOR_UPDATE_TEMPLATE_ENTRY{{
{
.dstBinding = ASTC_BINDING_INPUT_BUFFER,
.dstArrayElement = 0,
@@ -148,7 +160,6 @@ BuildASTCPassDescriptorUpdateTemplateEntry() {
.stride = sizeof(DescriptorUpdateEntry),
},
}};
-}
struct AstcPushConstants {
std::array<u32, 2> blocks_dims;
@@ -159,14 +170,14 @@ struct AstcPushConstants {
u32 block_height;
u32 block_height_mask;
};
-
} // Anonymous namespace
-VKComputePass::VKComputePass(const Device& device, VKDescriptorPool& descriptor_pool,
- vk::Span<VkDescriptorSetLayoutBinding> bindings,
- vk::Span<VkDescriptorUpdateTemplateEntryKHR> templates,
- vk::Span<VkPushConstantRange> push_constants,
- std::span<const u32> code) {
+ComputePass::ComputePass(const Device& device_, DescriptorPool& descriptor_pool,
+ vk::Span<VkDescriptorSetLayoutBinding> bindings,
+ vk::Span<VkDescriptorUpdateTemplateEntryKHR> templates,
+ const DescriptorBankInfo& bank_info,
+ vk::Span<VkPushConstantRange> push_constants, std::span<const u32> code)
+ : device{device_} {
descriptor_set_layout = device.GetLogical().CreateDescriptorSetLayout({
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
.pNext = nullptr,
@@ -196,8 +207,7 @@ VKComputePass::VKComputePass(const Device& device, VKDescriptorPool& descriptor_
.pipelineLayout = *layout,
.set = 0,
});
-
- descriptor_allocator.emplace(descriptor_pool, *descriptor_set_layout);
+ descriptor_allocator = descriptor_pool.Allocator(*descriptor_set_layout, bank_info);
}
module = device.GetLogical().CreateShaderModule({
.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO,
@@ -206,43 +216,34 @@ VKComputePass::VKComputePass(const Device& device, VKDescriptorPool& descriptor_
.codeSize = static_cast<u32>(code.size_bytes()),
.pCode = code.data(),
});
+ device.SaveShader(code);
pipeline = device.GetLogical().CreateComputePipeline({
.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
- .stage =
- {
- .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
- .pNext = nullptr,
- .flags = 0,
- .stage = VK_SHADER_STAGE_COMPUTE_BIT,
- .module = *module,
- .pName = "main",
- .pSpecializationInfo = nullptr,
- },
+ .stage{
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
+ .pNext = nullptr,
+ .flags = 0,
+ .stage = VK_SHADER_STAGE_COMPUTE_BIT,
+ .module = *module,
+ .pName = "main",
+ .pSpecializationInfo = nullptr,
+ },
.layout = *layout,
.basePipelineHandle = nullptr,
.basePipelineIndex = 0,
});
}
-VKComputePass::~VKComputePass() = default;
+ComputePass::~ComputePass() = default;
-VkDescriptorSet VKComputePass::CommitDescriptorSet(
- VKUpdateDescriptorQueue& update_descriptor_queue) {
- if (!descriptor_template) {
- return nullptr;
- }
- const VkDescriptorSet set = descriptor_allocator->Commit();
- update_descriptor_queue.Send(*descriptor_template, set);
- return set;
-}
-
-Uint8Pass::Uint8Pass(const Device& device, VKScheduler& scheduler_,
- VKDescriptorPool& descriptor_pool, StagingBufferPool& staging_buffer_pool_,
+Uint8Pass::Uint8Pass(const Device& device_, VKScheduler& scheduler_,
+ DescriptorPool& descriptor_pool, StagingBufferPool& staging_buffer_pool_,
VKUpdateDescriptorQueue& update_descriptor_queue_)
- : VKComputePass(device, descriptor_pool, BuildInputOutputDescriptorSetBindings(),
- BuildInputOutputDescriptorUpdateTemplate(), {}, VULKAN_UINT8_COMP_SPV),
+ : ComputePass(device_, descriptor_pool, INPUT_OUTPUT_DESCRIPTOR_SET_BINDINGS,
+ INPUT_OUTPUT_DESCRIPTOR_UPDATE_TEMPLATE, INPUT_OUTPUT_BANK_INFO, {},
+ VULKAN_UINT8_COMP_SPV),
scheduler{scheduler_}, staging_buffer_pool{staging_buffer_pool_},
update_descriptor_queue{update_descriptor_queue_} {}
@@ -256,11 +257,11 @@ std::pair<VkBuffer, VkDeviceSize> Uint8Pass::Assemble(u32 num_vertices, VkBuffer
update_descriptor_queue.Acquire();
update_descriptor_queue.AddBuffer(src_buffer, src_offset, num_vertices);
update_descriptor_queue.AddBuffer(staging.buffer, staging.offset, staging_size);
- const VkDescriptorSet set = CommitDescriptorSet(update_descriptor_queue);
+ const void* const descriptor_data{update_descriptor_queue.UpdateData()};
+ const VkBuffer buffer{staging.buffer};
scheduler.RequestOutsideRenderPassOperationContext();
- scheduler.Record([layout = *layout, pipeline = *pipeline, buffer = staging.buffer, set,
- num_vertices](vk::CommandBuffer cmdbuf) {
+ scheduler.Record([this, buffer, descriptor_data, num_vertices](vk::CommandBuffer cmdbuf) {
static constexpr u32 DISPATCH_SIZE = 1024;
static constexpr VkMemoryBarrier WRITE_BARRIER{
.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER,
@@ -268,8 +269,10 @@ std::pair<VkBuffer, VkDeviceSize> Uint8Pass::Assemble(u32 num_vertices, VkBuffer
.srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT,
.dstAccessMask = VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT,
};
- cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, pipeline);
- cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_COMPUTE, layout, 0, set, {});
+ const VkDescriptorSet set = descriptor_allocator.Commit();
+ device.GetLogical().UpdateDescriptorSet(set, *descriptor_template, descriptor_data);
+ cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline);
+ cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_COMPUTE, *layout, 0, set, {});
cmdbuf.Dispatch(Common::DivCeil(num_vertices, DISPATCH_SIZE), 1, 1);
cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, 0, WRITE_BARRIER);
@@ -278,12 +281,12 @@ std::pair<VkBuffer, VkDeviceSize> Uint8Pass::Assemble(u32 num_vertices, VkBuffer
}
QuadIndexedPass::QuadIndexedPass(const Device& device_, VKScheduler& scheduler_,
- VKDescriptorPool& descriptor_pool_,
+ DescriptorPool& descriptor_pool_,
StagingBufferPool& staging_buffer_pool_,
VKUpdateDescriptorQueue& update_descriptor_queue_)
- : VKComputePass(device_, descriptor_pool_, BuildInputOutputDescriptorSetBindings(),
- BuildInputOutputDescriptorUpdateTemplate(),
- BuildComputePushConstantRange(sizeof(u32) * 2), VULKAN_QUAD_INDEXED_COMP_SPV),
+ : ComputePass(device_, descriptor_pool_, INPUT_OUTPUT_DESCRIPTOR_SET_BINDINGS,
+ INPUT_OUTPUT_DESCRIPTOR_UPDATE_TEMPLATE, INPUT_OUTPUT_BANK_INFO,
+ COMPUTE_PUSH_CONSTANT_RANGE<sizeof(u32) * 2>, VULKAN_QUAD_INDEXED_COMP_SPV),
scheduler{scheduler_}, staging_buffer_pool{staging_buffer_pool_},
update_descriptor_queue{update_descriptor_queue_} {}
@@ -313,11 +316,11 @@ std::pair<VkBuffer, VkDeviceSize> QuadIndexedPass::Assemble(
update_descriptor_queue.Acquire();
update_descriptor_queue.AddBuffer(src_buffer, src_offset, input_size);
update_descriptor_queue.AddBuffer(staging.buffer, staging.offset, staging_size);
- const VkDescriptorSet set = CommitDescriptorSet(update_descriptor_queue);
+ const void* const descriptor_data{update_descriptor_queue.UpdateData()};
scheduler.RequestOutsideRenderPassOperationContext();
- scheduler.Record([layout = *layout, pipeline = *pipeline, buffer = staging.buffer, set,
- num_tri_vertices, base_vertex, index_shift](vk::CommandBuffer cmdbuf) {
+ scheduler.Record([this, buffer = staging.buffer, descriptor_data, num_tri_vertices, base_vertex,
+ index_shift](vk::CommandBuffer cmdbuf) {
static constexpr u32 DISPATCH_SIZE = 1024;
static constexpr VkMemoryBarrier WRITE_BARRIER{
.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER,
@@ -325,10 +328,12 @@ std::pair<VkBuffer, VkDeviceSize> QuadIndexedPass::Assemble(
.srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT,
.dstAccessMask = VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT,
};
- const std::array push_constants = {base_vertex, index_shift};
- cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, pipeline);
- cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_COMPUTE, layout, 0, set, {});
- cmdbuf.PushConstants(layout, VK_SHADER_STAGE_COMPUTE_BIT, 0, sizeof(push_constants),
+ const std::array push_constants{base_vertex, index_shift};
+ const VkDescriptorSet set = descriptor_allocator.Commit();
+ device.GetLogical().UpdateDescriptorSet(set, *descriptor_template, descriptor_data);
+ cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline);
+ cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_COMPUTE, *layout, 0, set, {});
+ cmdbuf.PushConstants(*layout, VK_SHADER_STAGE_COMPUTE_BIT, 0, sizeof(push_constants),
&push_constants);
cmdbuf.Dispatch(Common::DivCeil(num_tri_vertices, DISPATCH_SIZE), 1, 1);
cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
@@ -338,15 +343,14 @@ std::pair<VkBuffer, VkDeviceSize> QuadIndexedPass::Assemble(
}
ASTCDecoderPass::ASTCDecoderPass(const Device& device_, VKScheduler& scheduler_,
- VKDescriptorPool& descriptor_pool_,
+ DescriptorPool& descriptor_pool_,
StagingBufferPool& staging_buffer_pool_,
VKUpdateDescriptorQueue& update_descriptor_queue_,
MemoryAllocator& memory_allocator_)
- : VKComputePass(device_, descriptor_pool_, BuildASTCDescriptorSetBindings(),
- BuildASTCPassDescriptorUpdateTemplateEntry(),
- BuildComputePushConstantRange(sizeof(AstcPushConstants)),
- ASTC_DECODER_COMP_SPV),
- device{device_}, scheduler{scheduler_}, staging_buffer_pool{staging_buffer_pool_},
+ : ComputePass(device_, descriptor_pool_, ASTC_DESCRIPTOR_SET_BINDINGS,
+ ASTC_PASS_DESCRIPTOR_UPDATE_TEMPLATE_ENTRY, ASTC_BANK_INFO,
+ COMPUTE_PUSH_CONSTANT_RANGE<sizeof(AstcPushConstants)>, ASTC_DECODER_COMP_SPV),
+ scheduler{scheduler_}, staging_buffer_pool{staging_buffer_pool_},
update_descriptor_queue{update_descriptor_queue_}, memory_allocator{memory_allocator_} {}
ASTCDecoderPass::~ASTCDecoderPass() = default;
@@ -444,16 +448,14 @@ void ASTCDecoderPass::Assemble(Image& image, const StagingBufferRef& map,
update_descriptor_queue.AddBuffer(*data_buffer, sizeof(ASTC_ENCODINGS_VALUES),
sizeof(SWIZZLE_TABLE));
update_descriptor_queue.AddImage(image.StorageImageView(swizzle.level));
-
- const VkDescriptorSet set = CommitDescriptorSet(update_descriptor_queue);
- const VkPipelineLayout vk_layout = *layout;
+ const void* const descriptor_data{update_descriptor_queue.UpdateData()};
// To unswizzle the ASTC data
const auto params = MakeBlockLinearSwizzle2DParams(swizzle, image.info);
ASSERT(params.origin == (std::array<u32, 3>{0, 0, 0}));
ASSERT(params.destination == (std::array<s32, 3>{0, 0, 0}));
- scheduler.Record([vk_layout, num_dispatches_x, num_dispatches_y, num_dispatches_z,
- block_dims, params, set](vk::CommandBuffer cmdbuf) {
+ scheduler.Record([this, num_dispatches_x, num_dispatches_y, num_dispatches_z, block_dims,
+ params, descriptor_data](vk::CommandBuffer cmdbuf) {
const AstcPushConstants uniforms{
.blocks_dims = block_dims,
.bytes_per_block_log2 = params.bytes_per_block_log2,
@@ -463,8 +465,10 @@ void ASTCDecoderPass::Assemble(Image& image, const StagingBufferRef& map,
.block_height = params.block_height,
.block_height_mask = params.block_height_mask,
};
- cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_COMPUTE, vk_layout, 0, set, {});
- cmdbuf.PushConstants(vk_layout, VK_SHADER_STAGE_COMPUTE_BIT, uniforms);
+ const VkDescriptorSet set = descriptor_allocator.Commit();
+ device.GetLogical().UpdateDescriptorSet(set, *descriptor_template, descriptor_data);
+ cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_COMPUTE, *layout, 0, set, {});
+ cmdbuf.PushConstants(*layout, VK_SHADER_STAGE_COMPUTE_BIT, uniforms);
cmdbuf.Dispatch(num_dispatches_x, num_dispatches_y, num_dispatches_z);
});
}
diff --git a/src/video_core/renderer_vulkan/vk_compute_pass.h b/src/video_core/renderer_vulkan/vk_compute_pass.h
index 5ea187c30..114aef2bd 100644
--- a/src/video_core/renderer_vulkan/vk_compute_pass.h
+++ b/src/video_core/renderer_vulkan/vk_compute_pass.h
@@ -4,7 +4,6 @@
#pragma once
-#include <optional>
#include <span>
#include <utility>
@@ -27,31 +26,31 @@ class VKUpdateDescriptorQueue;
class Image;
struct StagingBufferRef;
-class VKComputePass {
+class ComputePass {
public:
- explicit VKComputePass(const Device& device, VKDescriptorPool& descriptor_pool,
- vk::Span<VkDescriptorSetLayoutBinding> bindings,
- vk::Span<VkDescriptorUpdateTemplateEntryKHR> templates,
- vk::Span<VkPushConstantRange> push_constants, std::span<const u32> code);
- ~VKComputePass();
+ explicit ComputePass(const Device& device, DescriptorPool& descriptor_pool,
+ vk::Span<VkDescriptorSetLayoutBinding> bindings,
+ vk::Span<VkDescriptorUpdateTemplateEntryKHR> templates,
+ const DescriptorBankInfo& bank_info,
+ vk::Span<VkPushConstantRange> push_constants, std::span<const u32> code);
+ ~ComputePass();
protected:
- VkDescriptorSet CommitDescriptorSet(VKUpdateDescriptorQueue& update_descriptor_queue);
-
+ const Device& device;
vk::DescriptorUpdateTemplateKHR descriptor_template;
vk::PipelineLayout layout;
vk::Pipeline pipeline;
+ vk::DescriptorSetLayout descriptor_set_layout;
+ DescriptorAllocator descriptor_allocator;
private:
- vk::DescriptorSetLayout descriptor_set_layout;
- std::optional<DescriptorAllocator> descriptor_allocator;
vk::ShaderModule module;
};
-class Uint8Pass final : public VKComputePass {
+class Uint8Pass final : public ComputePass {
public:
explicit Uint8Pass(const Device& device_, VKScheduler& scheduler_,
- VKDescriptorPool& descriptor_pool_, StagingBufferPool& staging_buffer_pool_,
+ DescriptorPool& descriptor_pool_, StagingBufferPool& staging_buffer_pool_,
VKUpdateDescriptorQueue& update_descriptor_queue_);
~Uint8Pass();
@@ -66,10 +65,10 @@ private:
VKUpdateDescriptorQueue& update_descriptor_queue;
};
-class QuadIndexedPass final : public VKComputePass {
+class QuadIndexedPass final : public ComputePass {
public:
explicit QuadIndexedPass(const Device& device_, VKScheduler& scheduler_,
- VKDescriptorPool& descriptor_pool_,
+ DescriptorPool& descriptor_pool_,
StagingBufferPool& staging_buffer_pool_,
VKUpdateDescriptorQueue& update_descriptor_queue_);
~QuadIndexedPass();
@@ -84,10 +83,10 @@ private:
VKUpdateDescriptorQueue& update_descriptor_queue;
};
-class ASTCDecoderPass final : public VKComputePass {
+class ASTCDecoderPass final : public ComputePass {
public:
explicit ASTCDecoderPass(const Device& device_, VKScheduler& scheduler_,
- VKDescriptorPool& descriptor_pool_,
+ DescriptorPool& descriptor_pool_,
StagingBufferPool& staging_buffer_pool_,
VKUpdateDescriptorQueue& update_descriptor_queue_,
MemoryAllocator& memory_allocator_);
@@ -99,7 +98,6 @@ public:
private:
void MakeDataBuffer();
- const Device& device;
VKScheduler& scheduler;
StagingBufferPool& staging_buffer_pool;
VKUpdateDescriptorQueue& update_descriptor_queue;
diff --git a/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp b/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp
index 3a48219b7..70b84c7a6 100644
--- a/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp
+++ b/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp
@@ -2,152 +2,198 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
+#include <algorithm>
#include <vector>
+#include <boost/container/small_vector.hpp>
+
+#include "video_core/renderer_vulkan/pipeline_helper.h"
+#include "video_core/renderer_vulkan/vk_buffer_cache.h"
#include "video_core/renderer_vulkan/vk_compute_pipeline.h"
#include "video_core/renderer_vulkan/vk_descriptor_pool.h"
#include "video_core/renderer_vulkan/vk_pipeline_cache.h"
#include "video_core/renderer_vulkan/vk_scheduler.h"
-#include "video_core/renderer_vulkan/vk_shader_decompiler.h"
#include "video_core/renderer_vulkan/vk_update_descriptor.h"
+#include "video_core/shader_notify.h"
#include "video_core/vulkan_common/vulkan_device.h"
#include "video_core/vulkan_common/vulkan_wrapper.h"
namespace Vulkan {
-VKComputePipeline::VKComputePipeline(const Device& device_, VKScheduler& scheduler_,
- VKDescriptorPool& descriptor_pool_,
- VKUpdateDescriptorQueue& update_descriptor_queue_,
- const SPIRVShader& shader_)
- : device{device_}, scheduler{scheduler_}, entries{shader_.entries},
- descriptor_set_layout{CreateDescriptorSetLayout()},
- descriptor_allocator{descriptor_pool_, *descriptor_set_layout},
- update_descriptor_queue{update_descriptor_queue_}, layout{CreatePipelineLayout()},
- descriptor_template{CreateDescriptorUpdateTemplate()},
- shader_module{CreateShaderModule(shader_.code)}, pipeline{CreatePipeline()} {}
-
-VKComputePipeline::~VKComputePipeline() = default;
-
-VkDescriptorSet VKComputePipeline::CommitDescriptorSet() {
- if (!descriptor_template) {
- return {};
- }
- const VkDescriptorSet set = descriptor_allocator.Commit();
- update_descriptor_queue.Send(*descriptor_template, set);
- return set;
-}
-
-vk::DescriptorSetLayout VKComputePipeline::CreateDescriptorSetLayout() const {
- std::vector<VkDescriptorSetLayoutBinding> bindings;
- u32 binding = 0;
- const auto add_bindings = [&](VkDescriptorType descriptor_type, std::size_t num_entries) {
- // TODO(Rodrigo): Maybe make individual bindings here?
- for (u32 bindpoint = 0; bindpoint < static_cast<u32>(num_entries); ++bindpoint) {
- bindings.push_back({
- .binding = binding++,
- .descriptorType = descriptor_type,
- .descriptorCount = 1,
- .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
- .pImmutableSamplers = nullptr,
- });
- }
- };
- add_bindings(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, entries.const_buffers.size());
- add_bindings(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, entries.global_buffers.size());
- add_bindings(VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, entries.uniform_texels.size());
- add_bindings(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, entries.samplers.size());
- add_bindings(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, entries.storage_texels.size());
- add_bindings(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, entries.images.size());
-
- return device.GetLogical().CreateDescriptorSetLayout({
- .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
- .pNext = nullptr,
- .flags = 0,
- .bindingCount = static_cast<u32>(bindings.size()),
- .pBindings = bindings.data(),
- });
-}
-
-vk::PipelineLayout VKComputePipeline::CreatePipelineLayout() const {
- return device.GetLogical().CreatePipelineLayout({
- .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
- .pNext = nullptr,
- .flags = 0,
- .setLayoutCount = 1,
- .pSetLayouts = descriptor_set_layout.address(),
- .pushConstantRangeCount = 0,
- .pPushConstantRanges = nullptr,
- });
-}
-
-vk::DescriptorUpdateTemplateKHR VKComputePipeline::CreateDescriptorUpdateTemplate() const {
- std::vector<VkDescriptorUpdateTemplateEntryKHR> template_entries;
- u32 binding = 0;
- u32 offset = 0;
- FillDescriptorUpdateTemplateEntries(entries, binding, offset, template_entries);
- if (template_entries.empty()) {
- // If the shader doesn't use descriptor sets, skip template creation.
- return {};
+using Shader::ImageBufferDescriptor;
+using Tegra::Texture::TexturePair;
+
+ComputePipeline::ComputePipeline(const Device& device_, DescriptorPool& descriptor_pool,
+ VKUpdateDescriptorQueue& update_descriptor_queue_,
+ Common::ThreadWorker* thread_worker,
+ VideoCore::ShaderNotify* shader_notify, const Shader::Info& info_,
+ vk::ShaderModule spv_module_)
+ : device{device_}, update_descriptor_queue{update_descriptor_queue_}, info{info_},
+ spv_module(std::move(spv_module_)) {
+ if (shader_notify) {
+ shader_notify->MarkShaderBuilding();
}
-
- return device.GetLogical().CreateDescriptorUpdateTemplateKHR({
- .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO_KHR,
- .pNext = nullptr,
- .flags = 0,
- .descriptorUpdateEntryCount = static_cast<u32>(template_entries.size()),
- .pDescriptorUpdateEntries = template_entries.data(),
- .templateType = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET_KHR,
- .descriptorSetLayout = *descriptor_set_layout,
- .pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS,
- .pipelineLayout = *layout,
- .set = DESCRIPTOR_SET,
- });
-}
-
-vk::ShaderModule VKComputePipeline::CreateShaderModule(const std::vector<u32>& code) const {
- device.SaveShader(code);
-
- return device.GetLogical().CreateShaderModule({
- .sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO,
- .pNext = nullptr,
- .flags = 0,
- .codeSize = code.size() * sizeof(u32),
- .pCode = code.data(),
- });
-}
-
-vk::Pipeline VKComputePipeline::CreatePipeline() const {
-
- VkComputePipelineCreateInfo ci{
- .sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
- .pNext = nullptr,
- .flags = 0,
- .stage =
- {
+ std::copy_n(info.constant_buffer_used_sizes.begin(), uniform_buffer_sizes.size(),
+ uniform_buffer_sizes.begin());
+
+ auto func{[this, &descriptor_pool, shader_notify] {
+ DescriptorLayoutBuilder builder{device};
+ builder.Add(info, VK_SHADER_STAGE_COMPUTE_BIT);
+
+ descriptor_set_layout = builder.CreateDescriptorSetLayout(false);
+ pipeline_layout = builder.CreatePipelineLayout(*descriptor_set_layout);
+ descriptor_update_template =
+ builder.CreateTemplate(*descriptor_set_layout, *pipeline_layout, false);
+ descriptor_allocator = descriptor_pool.Allocator(*descriptor_set_layout, info);
+ const VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT subgroup_size_ci{
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT,
+ .pNext = nullptr,
+ .requiredSubgroupSize = GuestWarpSize,
+ };
+ pipeline = device.GetLogical().CreateComputePipeline({
+ .sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
+ .pNext = nullptr,
+ .flags = 0,
+ .stage{
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
- .pNext = nullptr,
+ .pNext = device.IsExtSubgroupSizeControlSupported() ? &subgroup_size_ci : nullptr,
.flags = 0,
.stage = VK_SHADER_STAGE_COMPUTE_BIT,
- .module = *shader_module,
+ .module = *spv_module,
.pName = "main",
.pSpecializationInfo = nullptr,
},
- .layout = *layout,
- .basePipelineHandle = nullptr,
- .basePipelineIndex = 0,
- };
-
- const VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT subgroup_size_ci{
- .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT,
- .pNext = nullptr,
- .requiredSubgroupSize = GuestWarpSize,
- };
-
- if (entries.uses_warps && device.IsGuestWarpSizeSupported(VK_SHADER_STAGE_COMPUTE_BIT)) {
- ci.stage.pNext = &subgroup_size_ci;
+ .layout = *pipeline_layout,
+ .basePipelineHandle = 0,
+ .basePipelineIndex = 0,
+ });
+ std::lock_guard lock{build_mutex};
+ is_built = true;
+ build_condvar.notify_one();
+ if (shader_notify) {
+ shader_notify->MarkShaderComplete();
+ }
+ }};
+ if (thread_worker) {
+ thread_worker->QueueWork(std::move(func));
+ } else {
+ func();
+ }
+}
+
+void ComputePipeline::Configure(Tegra::Engines::KeplerCompute& kepler_compute,
+ Tegra::MemoryManager& gpu_memory, VKScheduler& scheduler,
+ BufferCache& buffer_cache, TextureCache& texture_cache) {
+ update_descriptor_queue.Acquire();
+
+ buffer_cache.SetComputeUniformBufferState(info.constant_buffer_mask, &uniform_buffer_sizes);
+ buffer_cache.UnbindComputeStorageBuffers();
+ size_t ssbo_index{};
+ for (const auto& desc : info.storage_buffers_descriptors) {
+ ASSERT(desc.count == 1);
+ buffer_cache.BindComputeStorageBuffer(ssbo_index, desc.cbuf_index, desc.cbuf_offset,
+ desc.is_written);
+ ++ssbo_index;
}
- return device.GetLogical().CreateComputePipeline(ci);
+ texture_cache.SynchronizeComputeDescriptors();
+
+ static constexpr size_t max_elements = 64;
+ std::array<ImageId, max_elements> image_view_ids;
+ boost::container::static_vector<u32, max_elements> image_view_indices;
+ boost::container::static_vector<VkSampler, max_elements> samplers;
+
+ const auto& qmd{kepler_compute.launch_description};
+ const auto& cbufs{qmd.const_buffer_config};
+ const bool via_header_index{qmd.linked_tsc != 0};
+ const auto read_handle{[&](const auto& desc, u32 index) {
+ ASSERT(((qmd.const_buffer_enable_mask >> desc.cbuf_index) & 1) != 0);
+ const u32 index_offset{index << desc.size_shift};
+ const u32 offset{desc.cbuf_offset + index_offset};
+ const GPUVAddr addr{cbufs[desc.cbuf_index].Address() + offset};
+ if constexpr (std::is_same_v<decltype(desc), const Shader::TextureDescriptor&> ||
+ std::is_same_v<decltype(desc), const Shader::TextureBufferDescriptor&>) {
+ if (desc.has_secondary) {
+ ASSERT(((qmd.const_buffer_enable_mask >> desc.secondary_cbuf_index) & 1) != 0);
+ const u32 secondary_offset{desc.secondary_cbuf_offset + index_offset};
+ const GPUVAddr separate_addr{cbufs[desc.secondary_cbuf_index].Address() +
+ secondary_offset};
+ const u32 lhs_raw{gpu_memory.Read<u32>(addr)};
+ const u32 rhs_raw{gpu_memory.Read<u32>(separate_addr)};
+ return TexturePair(lhs_raw | rhs_raw, via_header_index);
+ }
+ }
+ return TexturePair(gpu_memory.Read<u32>(addr), via_header_index);
+ }};
+ const auto add_image{[&](const auto& desc) {
+ for (u32 index = 0; index < desc.count; ++index) {
+ const auto handle{read_handle(desc, index)};
+ image_view_indices.push_back(handle.first);
+ }
+ }};
+ std::ranges::for_each(info.texture_buffer_descriptors, add_image);
+ std::ranges::for_each(info.image_buffer_descriptors, add_image);
+ for (const auto& desc : info.texture_descriptors) {
+ for (u32 index = 0; index < desc.count; ++index) {
+ const auto handle{read_handle(desc, index)};
+ image_view_indices.push_back(handle.first);
+
+ Sampler* const sampler = texture_cache.GetComputeSampler(handle.second);
+ samplers.push_back(sampler->Handle());
+ }
+ }
+ std::ranges::for_each(info.image_descriptors, add_image);
+
+ const std::span indices_span(image_view_indices.data(), image_view_indices.size());
+ texture_cache.FillComputeImageViews(indices_span, image_view_ids);
+
+ buffer_cache.UnbindComputeTextureBuffers();
+ ImageId* texture_buffer_ids{image_view_ids.data()};
+ size_t index{};
+ const auto add_buffer{[&](const auto& desc) {
+ constexpr bool is_image = std::is_same_v<decltype(desc), const ImageBufferDescriptor&>;
+ for (u32 i = 0; i < desc.count; ++i) {
+ bool is_written{false};
+ if constexpr (is_image) {
+ is_written = desc.is_written;
+ }
+ ImageView& image_view = texture_cache.GetImageView(*texture_buffer_ids);
+ buffer_cache.BindComputeTextureBuffer(index, image_view.GpuAddr(),
+ image_view.BufferSize(), image_view.format,
+ is_written, is_image);
+ ++texture_buffer_ids;
+ ++index;
+ }
+ }};
+ std::ranges::for_each(info.texture_buffer_descriptors, add_buffer);
+ std::ranges::for_each(info.image_buffer_descriptors, add_buffer);
+
+ buffer_cache.UpdateComputeBuffers();
+ buffer_cache.BindHostComputeBuffers();
+
+ const VkSampler* samplers_it{samplers.data()};
+ const ImageId* views_it{image_view_ids.data()};
+ PushImageDescriptors(info, samplers_it, views_it, texture_cache, update_descriptor_queue);
+
+ if (!is_built.load(std::memory_order::relaxed)) {
+ // Wait for the pipeline to be built
+ scheduler.Record([this](vk::CommandBuffer) {
+ std::unique_lock lock{build_mutex};
+ build_condvar.wait(lock, [this] { return is_built.load(std::memory_order::relaxed); });
+ });
+ }
+ const void* const descriptor_data{update_descriptor_queue.UpdateData()};
+ scheduler.Record([this, descriptor_data](vk::CommandBuffer cmdbuf) {
+ cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline);
+ if (!descriptor_set_layout) {
+ return;
+ }
+ const VkDescriptorSet descriptor_set{descriptor_allocator.Commit()};
+ const vk::Device& dev{device.GetLogical()};
+ dev.UpdateDescriptorSet(descriptor_set, *descriptor_update_template, descriptor_data);
+ cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline_layout, 0,
+ descriptor_set, nullptr);
+ });
}
} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_compute_pipeline.h b/src/video_core/renderer_vulkan/vk_compute_pipeline.h
index 7e16575ac..52fec04d3 100644
--- a/src/video_core/renderer_vulkan/vk_compute_pipeline.h
+++ b/src/video_core/renderer_vulkan/vk_compute_pipeline.h
@@ -4,61 +4,63 @@
#pragma once
+#include <atomic>
+#include <condition_variable>
+#include <mutex>
+
#include "common/common_types.h"
+#include "common/thread_worker.h"
+#include "shader_recompiler/shader_info.h"
+#include "video_core/memory_manager.h"
+#include "video_core/renderer_vulkan/vk_buffer_cache.h"
#include "video_core/renderer_vulkan/vk_descriptor_pool.h"
-#include "video_core/renderer_vulkan/vk_shader_decompiler.h"
+#include "video_core/renderer_vulkan/vk_texture_cache.h"
+#include "video_core/renderer_vulkan/vk_update_descriptor.h"
#include "video_core/vulkan_common/vulkan_wrapper.h"
+namespace VideoCore {
+class ShaderNotify;
+}
+
namespace Vulkan {
class Device;
class VKScheduler;
-class VKUpdateDescriptorQueue;
-class VKComputePipeline final {
+class ComputePipeline {
public:
- explicit VKComputePipeline(const Device& device_, VKScheduler& scheduler_,
- VKDescriptorPool& descriptor_pool_,
- VKUpdateDescriptorQueue& update_descriptor_queue_,
- const SPIRVShader& shader_);
- ~VKComputePipeline();
-
- VkDescriptorSet CommitDescriptorSet();
+ explicit ComputePipeline(const Device& device, DescriptorPool& descriptor_pool,
+ VKUpdateDescriptorQueue& update_descriptor_queue,
+ Common::ThreadWorker* thread_worker,
+ VideoCore::ShaderNotify* shader_notify, const Shader::Info& info,
+ vk::ShaderModule spv_module);
- VkPipeline GetHandle() const {
- return *pipeline;
- }
+ ComputePipeline& operator=(ComputePipeline&&) noexcept = delete;
+ ComputePipeline(ComputePipeline&&) noexcept = delete;
- VkPipelineLayout GetLayout() const {
- return *layout;
- }
+ ComputePipeline& operator=(const ComputePipeline&) = delete;
+ ComputePipeline(const ComputePipeline&) = delete;
- const ShaderEntries& GetEntries() const {
- return entries;
- }
+ void Configure(Tegra::Engines::KeplerCompute& kepler_compute, Tegra::MemoryManager& gpu_memory,
+ VKScheduler& scheduler, BufferCache& buffer_cache, TextureCache& texture_cache);
private:
- vk::DescriptorSetLayout CreateDescriptorSetLayout() const;
-
- vk::PipelineLayout CreatePipelineLayout() const;
-
- vk::DescriptorUpdateTemplateKHR CreateDescriptorUpdateTemplate() const;
-
- vk::ShaderModule CreateShaderModule(const std::vector<u32>& code) const;
-
- vk::Pipeline CreatePipeline() const;
-
const Device& device;
- VKScheduler& scheduler;
- ShaderEntries entries;
+ VKUpdateDescriptorQueue& update_descriptor_queue;
+ Shader::Info info;
+ VideoCommon::ComputeUniformBufferSizes uniform_buffer_sizes{};
+
+ vk::ShaderModule spv_module;
vk::DescriptorSetLayout descriptor_set_layout;
DescriptorAllocator descriptor_allocator;
- VKUpdateDescriptorQueue& update_descriptor_queue;
- vk::PipelineLayout layout;
- vk::DescriptorUpdateTemplateKHR descriptor_template;
- vk::ShaderModule shader_module;
+ vk::PipelineLayout pipeline_layout;
+ vk::DescriptorUpdateTemplateKHR descriptor_update_template;
vk::Pipeline pipeline;
+
+ std::condition_variable build_condvar;
+ std::mutex build_mutex;
+ std::atomic_bool is_built{false};
};
} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_descriptor_pool.cpp b/src/video_core/renderer_vulkan/vk_descriptor_pool.cpp
index ef9fb5910..8e77e4796 100644
--- a/src/video_core/renderer_vulkan/vk_descriptor_pool.cpp
+++ b/src/video_core/renderer_vulkan/vk_descriptor_pool.cpp
@@ -2,6 +2,8 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
+#include <mutex>
+#include <span>
#include <vector>
#include "common/common_types.h"
@@ -13,79 +15,149 @@
namespace Vulkan {
-// Prefer small grow rates to avoid saturating the descriptor pool with barely used pipelines.
-constexpr std::size_t SETS_GROW_RATE = 0x20;
+// Prefer small grow rates to avoid saturating the descriptor pool with barely used pipelines
+constexpr size_t SETS_GROW_RATE = 16;
+constexpr s32 SCORE_THRESHOLD = 3;
+constexpr u32 SETS_PER_POOL = 64;
-DescriptorAllocator::DescriptorAllocator(VKDescriptorPool& descriptor_pool_,
- VkDescriptorSetLayout layout_)
- : ResourcePool(descriptor_pool_.master_semaphore, SETS_GROW_RATE),
- descriptor_pool{descriptor_pool_}, layout{layout_} {}
+struct DescriptorBank {
+ DescriptorBankInfo info;
+ std::vector<vk::DescriptorPool> pools;
+};
-DescriptorAllocator::~DescriptorAllocator() = default;
+bool DescriptorBankInfo::IsSuperset(const DescriptorBankInfo& subset) const noexcept {
+ return uniform_buffers >= subset.uniform_buffers && storage_buffers >= subset.storage_buffers &&
+ texture_buffers >= subset.texture_buffers && image_buffers >= subset.image_buffers &&
+ textures >= subset.textures && images >= subset.image_buffers;
+}
-VkDescriptorSet DescriptorAllocator::Commit() {
- const std::size_t index = CommitResource();
- return descriptors_allocations[index / SETS_GROW_RATE][index % SETS_GROW_RATE];
+template <typename Descriptors>
+static u32 Accumulate(const Descriptors& descriptors) {
+ u32 count = 0;
+ for (const auto& descriptor : descriptors) {
+ count += descriptor.count;
+ }
+ return count;
}
-void DescriptorAllocator::Allocate(std::size_t begin, std::size_t end) {
- descriptors_allocations.push_back(descriptor_pool.AllocateDescriptors(layout, end - begin));
+static DescriptorBankInfo MakeBankInfo(std::span<const Shader::Info> infos) {
+ DescriptorBankInfo bank;
+ for (const Shader::Info& info : infos) {
+ bank.uniform_buffers += Accumulate(info.constant_buffer_descriptors);
+ bank.storage_buffers += Accumulate(info.storage_buffers_descriptors);
+ bank.texture_buffers += Accumulate(info.texture_buffer_descriptors);
+ bank.image_buffers += Accumulate(info.image_buffer_descriptors);
+ bank.textures += Accumulate(info.texture_descriptors);
+ bank.images += Accumulate(info.image_descriptors);
+ }
+ bank.score = bank.uniform_buffers + bank.storage_buffers + bank.texture_buffers +
+ bank.image_buffers + bank.textures + bank.images;
+ return bank;
}
-VKDescriptorPool::VKDescriptorPool(const Device& device_, VKScheduler& scheduler)
- : device{device_}, master_semaphore{scheduler.GetMasterSemaphore()}, active_pool{
- AllocateNewPool()} {}
-
-VKDescriptorPool::~VKDescriptorPool() = default;
-
-vk::DescriptorPool* VKDescriptorPool::AllocateNewPool() {
- static constexpr u32 num_sets = 0x20000;
- static constexpr VkDescriptorPoolSize pool_sizes[] = {
- {VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, num_sets * 90},
- {VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, num_sets * 60},
- {VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, num_sets * 64},
- {VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, num_sets * 64},
- {VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, num_sets * 64},
- {VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, num_sets * 40},
+static void AllocatePool(const Device& device, DescriptorBank& bank) {
+ std::array<VkDescriptorPoolSize, 6> pool_sizes;
+ size_t pool_cursor{};
+ const auto add = [&](VkDescriptorType type, u32 count) {
+ if (count > 0) {
+ pool_sizes[pool_cursor++] = {
+ .type = type,
+ .descriptorCount = count * SETS_PER_POOL,
+ };
+ }
};
-
- const VkDescriptorPoolCreateInfo ci{
+ const auto& info{bank.info};
+ add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, info.uniform_buffers);
+ add(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, info.storage_buffers);
+ add(VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, info.texture_buffers);
+ add(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, info.image_buffers);
+ add(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, info.textures);
+ add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, info.images);
+ bank.pools.push_back(device.GetLogical().CreateDescriptorPool({
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
.pNext = nullptr,
.flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
- .maxSets = num_sets,
- .poolSizeCount = static_cast<u32>(std::size(pool_sizes)),
+ .maxSets = SETS_PER_POOL,
+ .poolSizeCount = static_cast<u32>(pool_cursor),
.pPoolSizes = std::data(pool_sizes),
- };
- return &pools.emplace_back(device.GetLogical().CreateDescriptorPool(ci));
+ }));
+}
+
+DescriptorAllocator::DescriptorAllocator(const Device& device_, MasterSemaphore& master_semaphore_,
+ DescriptorBank& bank_, VkDescriptorSetLayout layout_)
+ : ResourcePool(master_semaphore_, SETS_GROW_RATE), device{&device_}, bank{&bank_},
+ layout{layout_} {}
+
+VkDescriptorSet DescriptorAllocator::Commit() {
+ const size_t index = CommitResource();
+ return sets[index / SETS_GROW_RATE][index % SETS_GROW_RATE];
}
-vk::DescriptorSets VKDescriptorPool::AllocateDescriptors(VkDescriptorSetLayout layout,
- std::size_t count) {
- const std::vector layout_copies(count, layout);
- VkDescriptorSetAllocateInfo ai{
+void DescriptorAllocator::Allocate(size_t begin, size_t end) {
+ sets.push_back(AllocateDescriptors(end - begin));
+}
+
+vk::DescriptorSets DescriptorAllocator::AllocateDescriptors(size_t count) {
+ const std::vector<VkDescriptorSetLayout> layouts(count, layout);
+ VkDescriptorSetAllocateInfo allocate_info{
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
.pNext = nullptr,
- .descriptorPool = **active_pool,
+ .descriptorPool = *bank->pools.back(),
.descriptorSetCount = static_cast<u32>(count),
- .pSetLayouts = layout_copies.data(),
+ .pSetLayouts = layouts.data(),
};
-
- vk::DescriptorSets sets = active_pool->Allocate(ai);
- if (!sets.IsOutOfPoolMemory()) {
- return sets;
+ vk::DescriptorSets new_sets = bank->pools.back().Allocate(allocate_info);
+ if (!new_sets.IsOutOfPoolMemory()) {
+ return new_sets;
}
-
// Our current pool is out of memory. Allocate a new one and retry
- active_pool = AllocateNewPool();
- ai.descriptorPool = **active_pool;
- sets = active_pool->Allocate(ai);
- if (!sets.IsOutOfPoolMemory()) {
- return sets;
+ AllocatePool(*device, *bank);
+ allocate_info.descriptorPool = *bank->pools.back();
+ new_sets = bank->pools.back().Allocate(allocate_info);
+ if (!new_sets.IsOutOfPoolMemory()) {
+ return new_sets;
}
-
// After allocating a new pool, we are out of memory again. We can't handle this from here.
throw vk::Exception(VK_ERROR_OUT_OF_POOL_MEMORY);
}
+DescriptorPool::DescriptorPool(const Device& device_, VKScheduler& scheduler)
+ : device{device_}, master_semaphore{scheduler.GetMasterSemaphore()} {}
+
+DescriptorPool::~DescriptorPool() = default;
+
+DescriptorAllocator DescriptorPool::Allocator(VkDescriptorSetLayout layout,
+ std::span<const Shader::Info> infos) {
+ return Allocator(layout, MakeBankInfo(infos));
+}
+
+DescriptorAllocator DescriptorPool::Allocator(VkDescriptorSetLayout layout,
+ const Shader::Info& info) {
+ return Allocator(layout, MakeBankInfo(std::array{info}));
+}
+
+DescriptorAllocator DescriptorPool::Allocator(VkDescriptorSetLayout layout,
+ const DescriptorBankInfo& info) {
+ return DescriptorAllocator(device, master_semaphore, Bank(info), layout);
+}
+
+DescriptorBank& DescriptorPool::Bank(const DescriptorBankInfo& reqs) {
+ std::shared_lock read_lock{banks_mutex};
+ const auto it = std::ranges::find_if(bank_infos, [&reqs](const DescriptorBankInfo& bank) {
+ return std::abs(bank.score - reqs.score) < SCORE_THRESHOLD && bank.IsSuperset(reqs);
+ });
+ if (it != bank_infos.end()) {
+ return *banks[std::distance(bank_infos.begin(), it)].get();
+ }
+ read_lock.unlock();
+
+ std::unique_lock write_lock{banks_mutex};
+ bank_infos.push_back(reqs);
+
+ auto& bank = *banks.emplace_back(std::make_unique<DescriptorBank>());
+ bank.info = reqs;
+ AllocatePool(device, bank);
+ return bank;
+}
+
} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_descriptor_pool.h b/src/video_core/renderer_vulkan/vk_descriptor_pool.h
index f892be7be..59466aac5 100644
--- a/src/video_core/renderer_vulkan/vk_descriptor_pool.h
+++ b/src/video_core/renderer_vulkan/vk_descriptor_pool.h
@@ -4,57 +4,85 @@
#pragma once
+#include <shared_mutex>
+#include <span>
#include <vector>
+#include "shader_recompiler/shader_info.h"
#include "video_core/renderer_vulkan/vk_resource_pool.h"
#include "video_core/vulkan_common/vulkan_wrapper.h"
namespace Vulkan {
class Device;
-class VKDescriptorPool;
class VKScheduler;
+struct DescriptorBank;
+
+struct DescriptorBankInfo {
+ [[nodiscard]] bool IsSuperset(const DescriptorBankInfo& subset) const noexcept;
+
+ u32 uniform_buffers{}; ///< Number of uniform buffer descriptors
+ u32 storage_buffers{}; ///< Number of storage buffer descriptors
+ u32 texture_buffers{}; ///< Number of texture buffer descriptors
+ u32 image_buffers{}; ///< Number of image buffer descriptors
+ u32 textures{}; ///< Number of texture descriptors
+ u32 images{}; ///< Number of image descriptors
+ s32 score{}; ///< Number of descriptors in total
+};
+
class DescriptorAllocator final : public ResourcePool {
+ friend class DescriptorPool;
+
public:
- explicit DescriptorAllocator(VKDescriptorPool& descriptor_pool, VkDescriptorSetLayout layout);
- ~DescriptorAllocator() override;
+ explicit DescriptorAllocator() = default;
+ ~DescriptorAllocator() override = default;
+
+ DescriptorAllocator& operator=(DescriptorAllocator&&) noexcept = default;
+ DescriptorAllocator(DescriptorAllocator&&) noexcept = default;
DescriptorAllocator& operator=(const DescriptorAllocator&) = delete;
DescriptorAllocator(const DescriptorAllocator&) = delete;
VkDescriptorSet Commit();
-protected:
- void Allocate(std::size_t begin, std::size_t end) override;
-
private:
- VKDescriptorPool& descriptor_pool;
- const VkDescriptorSetLayout layout;
+ explicit DescriptorAllocator(const Device& device_, MasterSemaphore& master_semaphore_,
+ DescriptorBank& bank_, VkDescriptorSetLayout layout_);
- std::vector<vk::DescriptorSets> descriptors_allocations;
-};
+ void Allocate(size_t begin, size_t end) override;
+
+ vk::DescriptorSets AllocateDescriptors(size_t count);
+
+ const Device* device{};
+ DescriptorBank* bank{};
+ VkDescriptorSetLayout layout{};
-class VKDescriptorPool final {
- friend DescriptorAllocator;
+ std::vector<vk::DescriptorSets> sets;
+};
+class DescriptorPool {
public:
- explicit VKDescriptorPool(const Device& device, VKScheduler& scheduler);
- ~VKDescriptorPool();
+ explicit DescriptorPool(const Device& device, VKScheduler& scheduler);
+ ~DescriptorPool();
- VKDescriptorPool(const VKDescriptorPool&) = delete;
- VKDescriptorPool& operator=(const VKDescriptorPool&) = delete;
+ DescriptorPool& operator=(const DescriptorPool&) = delete;
+ DescriptorPool(const DescriptorPool&) = delete;
-private:
- vk::DescriptorPool* AllocateNewPool();
+ DescriptorAllocator Allocator(VkDescriptorSetLayout layout,
+ std::span<const Shader::Info> infos);
+ DescriptorAllocator Allocator(VkDescriptorSetLayout layout, const Shader::Info& info);
+ DescriptorAllocator Allocator(VkDescriptorSetLayout layout, const DescriptorBankInfo& info);
- vk::DescriptorSets AllocateDescriptors(VkDescriptorSetLayout layout, std::size_t count);
+private:
+ DescriptorBank& Bank(const DescriptorBankInfo& reqs);
const Device& device;
MasterSemaphore& master_semaphore;
- std::vector<vk::DescriptorPool> pools;
- vk::DescriptorPool* active_pool;
+ std::shared_mutex banks_mutex;
+ std::vector<DescriptorBankInfo> bank_infos;
+ std::vector<std::unique_ptr<DescriptorBank>> banks;
};
} // namespace Vulkan \ No newline at end of file
diff --git a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp
index fc6dd83eb..18482e1d0 100644
--- a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp
+++ b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp
@@ -1,29 +1,58 @@
-// Copyright 2019 yuzu Emulator Project
+// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <algorithm>
-#include <array>
-#include <cstring>
-#include <vector>
+#include <span>
-#include "common/common_types.h"
-#include "common/microprofile.h"
-#include "video_core/renderer_vulkan/fixed_pipeline_state.h"
+#include <boost/container/small_vector.hpp>
+#include <boost/container/static_vector.hpp>
+
+#include "common/bit_field.h"
#include "video_core/renderer_vulkan/maxwell_to_vk.h"
-#include "video_core/renderer_vulkan/vk_descriptor_pool.h"
+#include "video_core/renderer_vulkan/pipeline_helper.h"
+#include "video_core/renderer_vulkan/vk_buffer_cache.h"
#include "video_core/renderer_vulkan/vk_graphics_pipeline.h"
-#include "video_core/renderer_vulkan/vk_pipeline_cache.h"
+#include "video_core/renderer_vulkan/vk_render_pass_cache.h"
#include "video_core/renderer_vulkan/vk_scheduler.h"
+#include "video_core/renderer_vulkan/vk_texture_cache.h"
#include "video_core/renderer_vulkan/vk_update_descriptor.h"
+#include "video_core/shader_notify.h"
#include "video_core/vulkan_common/vulkan_device.h"
-#include "video_core/vulkan_common/vulkan_wrapper.h"
-
-namespace Vulkan {
-MICROPROFILE_DECLARE(Vulkan_PipelineCache);
+#if defined(_MSC_VER) && defined(NDEBUG)
+#define LAMBDA_FORCEINLINE [[msvc::forceinline]]
+#else
+#define LAMBDA_FORCEINLINE
+#endif
+namespace Vulkan {
namespace {
+using boost::container::small_vector;
+using boost::container::static_vector;
+using Shader::ImageBufferDescriptor;
+using Tegra::Texture::TexturePair;
+using VideoCore::Surface::PixelFormat;
+using VideoCore::Surface::PixelFormatFromDepthFormat;
+using VideoCore::Surface::PixelFormatFromRenderTargetFormat;
+
+constexpr size_t NUM_STAGES = Maxwell::MaxShaderStage;
+constexpr size_t MAX_IMAGE_ELEMENTS = 64;
+
+DescriptorLayoutBuilder MakeBuilder(const Device& device, std::span<const Shader::Info> infos) {
+ DescriptorLayoutBuilder builder{device};
+ for (size_t index = 0; index < infos.size(); ++index) {
+ static constexpr std::array stages{
+ VK_SHADER_STAGE_VERTEX_BIT,
+ VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT,
+ VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT,
+ VK_SHADER_STAGE_GEOMETRY_BIT,
+ VK_SHADER_STAGE_FRAGMENT_BIT,
+ };
+ builder.Add(infos[index], stages.at(index));
+ }
+ return builder;
+}
template <class StencilFace>
VkStencilOpState GetStencilFaceState(const StencilFace& face) {
@@ -39,15 +68,24 @@ VkStencilOpState GetStencilFaceState(const StencilFace& face) {
}
bool SupportsPrimitiveRestart(VkPrimitiveTopology topology) {
- static constexpr std::array unsupported_topologies = {
+ static constexpr std::array unsupported_topologies{
VK_PRIMITIVE_TOPOLOGY_POINT_LIST,
VK_PRIMITIVE_TOPOLOGY_LINE_LIST,
VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST,
VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY,
VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY,
- VK_PRIMITIVE_TOPOLOGY_PATCH_LIST};
- return std::find(std::begin(unsupported_topologies), std::end(unsupported_topologies),
- topology) == std::end(unsupported_topologies);
+ VK_PRIMITIVE_TOPOLOGY_PATCH_LIST,
+ // VK_PRIMITIVE_TOPOLOGY_QUAD_LIST_EXT,
+ };
+ return std::ranges::find(unsupported_topologies, topology) == unsupported_topologies.end();
+}
+
+bool IsLine(VkPrimitiveTopology topology) {
+ static constexpr std::array line_topologies{
+ VK_PRIMITIVE_TOPOLOGY_LINE_LIST, VK_PRIMITIVE_TOPOLOGY_LINE_STRIP,
+ // VK_PRIMITIVE_TOPOLOGY_LINE_LOOP_EXT,
+ };
+ return std::ranges::find(line_topologies, topology) == line_topologies.end();
}
VkViewportSwizzleNV UnpackViewportSwizzle(u16 swizzle) {
@@ -59,8 +97,7 @@ VkViewportSwizzleNV UnpackViewportSwizzle(u16 swizzle) {
BitField<12, 3, Maxwell::ViewportSwizzle> w;
};
const Swizzle unpacked{swizzle};
-
- return {
+ return VkViewportSwizzleNV{
.x = MaxwellToVK::ViewportSwizzle(unpacked.x),
.y = MaxwellToVK::ViewportSwizzle(unpacked.y),
.z = MaxwellToVK::ViewportSwizzle(unpacked.z),
@@ -68,193 +105,446 @@ VkViewportSwizzleNV UnpackViewportSwizzle(u16 swizzle) {
};
}
-VkSampleCountFlagBits ConvertMsaaMode(Tegra::Texture::MsaaMode msaa_mode) {
- switch (msaa_mode) {
- case Tegra::Texture::MsaaMode::Msaa1x1:
- return VK_SAMPLE_COUNT_1_BIT;
- case Tegra::Texture::MsaaMode::Msaa2x1:
- case Tegra::Texture::MsaaMode::Msaa2x1_D3D:
- return VK_SAMPLE_COUNT_2_BIT;
- case Tegra::Texture::MsaaMode::Msaa2x2:
- case Tegra::Texture::MsaaMode::Msaa2x2_VC4:
- case Tegra::Texture::MsaaMode::Msaa2x2_VC12:
- return VK_SAMPLE_COUNT_4_BIT;
- case Tegra::Texture::MsaaMode::Msaa4x2:
- case Tegra::Texture::MsaaMode::Msaa4x2_D3D:
- case Tegra::Texture::MsaaMode::Msaa4x2_VC8:
- case Tegra::Texture::MsaaMode::Msaa4x2_VC24:
- return VK_SAMPLE_COUNT_8_BIT;
- case Tegra::Texture::MsaaMode::Msaa4x4:
- return VK_SAMPLE_COUNT_16_BIT;
- default:
- UNREACHABLE_MSG("Invalid msaa_mode={}", static_cast<int>(msaa_mode));
- return VK_SAMPLE_COUNT_1_BIT;
+PixelFormat DecodeFormat(u8 encoded_format) {
+ const auto format{static_cast<Tegra::RenderTargetFormat>(encoded_format)};
+ if (format == Tegra::RenderTargetFormat::NONE) {
+ return PixelFormat::Invalid;
}
+ return PixelFormatFromRenderTargetFormat(format);
}
-} // Anonymous namespace
+RenderPassKey MakeRenderPassKey(const FixedPipelineState& state) {
+ RenderPassKey key;
+ std::ranges::transform(state.color_formats, key.color_formats.begin(), DecodeFormat);
+ if (state.depth_enabled != 0) {
+ const auto depth_format{static_cast<Tegra::DepthFormat>(state.depth_format.Value())};
+ key.depth_format = PixelFormatFromDepthFormat(depth_format);
+ } else {
+ key.depth_format = PixelFormat::Invalid;
+ }
+ key.samples = MaxwellToVK::MsaaMode(state.msaa_mode);
+ return key;
+}
-VKGraphicsPipeline::VKGraphicsPipeline(const Device& device_, VKScheduler& scheduler_,
- VKDescriptorPool& descriptor_pool_,
- VKUpdateDescriptorQueue& update_descriptor_queue_,
- const GraphicsPipelineCacheKey& key,
- vk::Span<VkDescriptorSetLayoutBinding> bindings,
- const SPIRVProgram& program, u32 num_color_buffers)
- : device{device_}, scheduler{scheduler_}, cache_key{key}, hash{cache_key.Hash()},
- descriptor_set_layout{CreateDescriptorSetLayout(bindings)},
- descriptor_allocator{descriptor_pool_, *descriptor_set_layout},
- update_descriptor_queue{update_descriptor_queue_}, layout{CreatePipelineLayout()},
- descriptor_template{CreateDescriptorUpdateTemplate(program)},
- modules(CreateShaderModules(program)),
- pipeline(CreatePipeline(program, cache_key.renderpass, num_color_buffers)) {}
-
-VKGraphicsPipeline::~VKGraphicsPipeline() = default;
-
-VkDescriptorSet VKGraphicsPipeline::CommitDescriptorSet() {
- if (!descriptor_template) {
- return {};
- }
- const VkDescriptorSet set = descriptor_allocator.Commit();
- update_descriptor_queue.Send(*descriptor_template, set);
- return set;
+size_t NumAttachments(const FixedPipelineState& state) {
+ size_t num{};
+ for (size_t index = 0; index < Maxwell::NumRenderTargets; ++index) {
+ const auto format{static_cast<Tegra::RenderTargetFormat>(state.color_formats[index])};
+ if (format != Tegra::RenderTargetFormat::NONE) {
+ num = index + 1;
+ }
+ }
+ return num;
}
-vk::DescriptorSetLayout VKGraphicsPipeline::CreateDescriptorSetLayout(
- vk::Span<VkDescriptorSetLayoutBinding> bindings) const {
- const VkDescriptorSetLayoutCreateInfo ci{
- .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
- .pNext = nullptr,
- .flags = 0,
- .bindingCount = bindings.size(),
- .pBindings = bindings.data(),
- };
- return device.GetLogical().CreateDescriptorSetLayout(ci);
+template <typename Spec>
+bool Passes(const std::array<vk::ShaderModule, NUM_STAGES>& modules,
+ const std::array<Shader::Info, NUM_STAGES>& stage_infos) {
+ for (size_t stage = 0; stage < NUM_STAGES; ++stage) {
+ if (!Spec::enabled_stages[stage] && modules[stage]) {
+ return false;
+ }
+ const auto& info{stage_infos[stage]};
+ if constexpr (!Spec::has_storage_buffers) {
+ if (!info.storage_buffers_descriptors.empty()) {
+ return false;
+ }
+ }
+ if constexpr (!Spec::has_texture_buffers) {
+ if (!info.texture_buffer_descriptors.empty()) {
+ return false;
+ }
+ }
+ if constexpr (!Spec::has_image_buffers) {
+ if (!info.image_buffer_descriptors.empty()) {
+ return false;
+ }
+ }
+ if constexpr (!Spec::has_images) {
+ if (!info.image_descriptors.empty()) {
+ return false;
+ }
+ }
+ }
+ return true;
}
-vk::PipelineLayout VKGraphicsPipeline::CreatePipelineLayout() const {
- const VkPipelineLayoutCreateInfo ci{
- .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
- .pNext = nullptr,
- .flags = 0,
- .setLayoutCount = 1,
- .pSetLayouts = descriptor_set_layout.address(),
- .pushConstantRangeCount = 0,
- .pPushConstantRanges = nullptr,
- };
- return device.GetLogical().CreatePipelineLayout(ci);
+using ConfigureFuncPtr = void (*)(GraphicsPipeline*, bool);
+
+template <typename Spec, typename... Specs>
+ConfigureFuncPtr FindSpec(const std::array<vk::ShaderModule, NUM_STAGES>& modules,
+ const std::array<Shader::Info, NUM_STAGES>& stage_infos) {
+ if constexpr (sizeof...(Specs) > 0) {
+ if (!Passes<Spec>(modules, stage_infos)) {
+ return FindSpec<Specs...>(modules, stage_infos);
+ }
+ }
+ return GraphicsPipeline::MakeConfigureSpecFunc<Spec>();
}
-vk::DescriptorUpdateTemplateKHR VKGraphicsPipeline::CreateDescriptorUpdateTemplate(
- const SPIRVProgram& program) const {
- std::vector<VkDescriptorUpdateTemplateEntry> template_entries;
- u32 binding = 0;
- u32 offset = 0;
- for (const auto& stage : program) {
- if (stage) {
- FillDescriptorUpdateTemplateEntries(stage->entries, binding, offset, template_entries);
+struct SimpleVertexFragmentSpec {
+ static constexpr std::array<bool, 5> enabled_stages{true, false, false, false, true};
+ static constexpr bool has_storage_buffers = false;
+ static constexpr bool has_texture_buffers = false;
+ static constexpr bool has_image_buffers = false;
+ static constexpr bool has_images = false;
+};
+
+struct SimpleVertexSpec {
+ static constexpr std::array<bool, 5> enabled_stages{true, false, false, false, false};
+ static constexpr bool has_storage_buffers = false;
+ static constexpr bool has_texture_buffers = false;
+ static constexpr bool has_image_buffers = false;
+ static constexpr bool has_images = false;
+};
+
+struct DefaultSpec {
+ static constexpr std::array<bool, 5> enabled_stages{true, true, true, true, true};
+ static constexpr bool has_storage_buffers = true;
+ static constexpr bool has_texture_buffers = true;
+ static constexpr bool has_image_buffers = true;
+ static constexpr bool has_images = true;
+};
+
+ConfigureFuncPtr ConfigureFunc(const std::array<vk::ShaderModule, NUM_STAGES>& modules,
+ const std::array<Shader::Info, NUM_STAGES>& infos) {
+ return FindSpec<SimpleVertexSpec, SimpleVertexFragmentSpec, DefaultSpec>(modules, infos);
+}
+} // Anonymous namespace
+
+GraphicsPipeline::GraphicsPipeline(
+ Tegra::Engines::Maxwell3D& maxwell3d_, Tegra::MemoryManager& gpu_memory_,
+ VKScheduler& scheduler_, BufferCache& buffer_cache_, TextureCache& texture_cache_,
+ VideoCore::ShaderNotify* shader_notify, const Device& device_, DescriptorPool& descriptor_pool,
+ VKUpdateDescriptorQueue& update_descriptor_queue_, Common::ThreadWorker* worker_thread,
+ RenderPassCache& render_pass_cache, const GraphicsPipelineCacheKey& key_,
+ std::array<vk::ShaderModule, NUM_STAGES> stages,
+ const std::array<const Shader::Info*, NUM_STAGES>& infos)
+ : key{key_}, maxwell3d{maxwell3d_}, gpu_memory{gpu_memory_}, device{device_},
+ texture_cache{texture_cache_}, buffer_cache{buffer_cache_}, scheduler{scheduler_},
+ update_descriptor_queue{update_descriptor_queue_}, spv_modules{std::move(stages)} {
+ if (shader_notify) {
+ shader_notify->MarkShaderBuilding();
+ }
+ for (size_t stage = 0; stage < NUM_STAGES; ++stage) {
+ const Shader::Info* const info{infos[stage]};
+ if (!info) {
+ continue;
}
+ stage_infos[stage] = *info;
+ enabled_uniform_buffer_masks[stage] = info->constant_buffer_mask;
+ std::ranges::copy(info->constant_buffer_used_sizes, uniform_buffer_sizes[stage].begin());
}
- if (template_entries.empty()) {
- // If the shader doesn't use descriptor sets, skip template creation.
- return {};
+ auto func{[this, shader_notify, &render_pass_cache, &descriptor_pool] {
+ DescriptorLayoutBuilder builder{MakeBuilder(device, stage_infos)};
+ uses_push_descriptor = builder.CanUsePushDescriptor();
+ descriptor_set_layout = builder.CreateDescriptorSetLayout(uses_push_descriptor);
+ if (!uses_push_descriptor) {
+ descriptor_allocator = descriptor_pool.Allocator(*descriptor_set_layout, stage_infos);
+ }
+ const VkDescriptorSetLayout set_layout{*descriptor_set_layout};
+ pipeline_layout = builder.CreatePipelineLayout(set_layout);
+ descriptor_update_template =
+ builder.CreateTemplate(set_layout, *pipeline_layout, uses_push_descriptor);
+
+ const VkRenderPass render_pass{render_pass_cache.Get(MakeRenderPassKey(key.state))};
+ Validate();
+ MakePipeline(render_pass);
+
+ std::lock_guard lock{build_mutex};
+ is_built = true;
+ build_condvar.notify_one();
+ if (shader_notify) {
+ shader_notify->MarkShaderComplete();
+ }
+ }};
+ if (worker_thread) {
+ worker_thread->QueueWork(std::move(func));
+ } else {
+ func();
}
+ configure_func = ConfigureFunc(spv_modules, stage_infos);
+}
- const VkDescriptorUpdateTemplateCreateInfoKHR ci{
- .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO_KHR,
- .pNext = nullptr,
- .flags = 0,
- .descriptorUpdateEntryCount = static_cast<u32>(template_entries.size()),
- .pDescriptorUpdateEntries = template_entries.data(),
- .templateType = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET_KHR,
- .descriptorSetLayout = *descriptor_set_layout,
- .pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS,
- .pipelineLayout = *layout,
- .set = DESCRIPTOR_SET,
- };
- return device.GetLogical().CreateDescriptorUpdateTemplateKHR(ci);
+void GraphicsPipeline::AddTransition(GraphicsPipeline* transition) {
+ transition_keys.push_back(transition->key);
+ transitions.push_back(transition);
}
-std::vector<vk::ShaderModule> VKGraphicsPipeline::CreateShaderModules(
- const SPIRVProgram& program) const {
- VkShaderModuleCreateInfo ci{
- .sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO,
- .pNext = nullptr,
- .flags = 0,
- .codeSize = 0,
- .pCode = nullptr,
- };
+template <typename Spec>
+void GraphicsPipeline::ConfigureImpl(bool is_indexed) {
+ std::array<ImageId, MAX_IMAGE_ELEMENTS> image_view_ids;
+ std::array<u32, MAX_IMAGE_ELEMENTS> image_view_indices;
+ std::array<VkSampler, MAX_IMAGE_ELEMENTS> samplers;
+ size_t sampler_index{};
+ size_t image_index{};
+
+ texture_cache.SynchronizeGraphicsDescriptors();
+
+ buffer_cache.SetUniformBuffersState(enabled_uniform_buffer_masks, &uniform_buffer_sizes);
+
+ const auto& regs{maxwell3d.regs};
+ const bool via_header_index{regs.sampler_index == Maxwell::SamplerIndex::ViaHeaderIndex};
+ const auto config_stage{[&](size_t stage) LAMBDA_FORCEINLINE {
+ const Shader::Info& info{stage_infos[stage]};
+ buffer_cache.UnbindGraphicsStorageBuffers(stage);
+ if constexpr (Spec::has_storage_buffers) {
+ size_t ssbo_index{};
+ for (const auto& desc : info.storage_buffers_descriptors) {
+ ASSERT(desc.count == 1);
+ buffer_cache.BindGraphicsStorageBuffer(stage, ssbo_index, desc.cbuf_index,
+ desc.cbuf_offset, desc.is_written);
+ ++ssbo_index;
+ }
+ }
+ const auto& cbufs{maxwell3d.state.shader_stages[stage].const_buffers};
+ const auto read_handle{[&](const auto& desc, u32 index) {
+ ASSERT(cbufs[desc.cbuf_index].enabled);
+ const u32 index_offset{index << desc.size_shift};
+ const u32 offset{desc.cbuf_offset + index_offset};
+ const GPUVAddr addr{cbufs[desc.cbuf_index].address + offset};
+ if constexpr (std::is_same_v<decltype(desc), const Shader::TextureDescriptor&> ||
+ std::is_same_v<decltype(desc), const Shader::TextureBufferDescriptor&>) {
+ if (desc.has_secondary) {
+ ASSERT(cbufs[desc.secondary_cbuf_index].enabled);
+ const u32 second_offset{desc.secondary_cbuf_offset + index_offset};
+ const GPUVAddr separate_addr{cbufs[desc.secondary_cbuf_index].address +
+ second_offset};
+ const u32 lhs_raw{gpu_memory.Read<u32>(addr)};
+ const u32 rhs_raw{gpu_memory.Read<u32>(separate_addr)};
+ const u32 raw{lhs_raw | rhs_raw};
+ return TexturePair(raw, via_header_index);
+ }
+ }
+ return TexturePair(gpu_memory.Read<u32>(addr), via_header_index);
+ }};
+ const auto add_image{[&](const auto& desc) {
+ for (u32 index = 0; index < desc.count; ++index) {
+ const auto handle{read_handle(desc, index)};
+ image_view_indices[image_index++] = handle.first;
+ }
+ }};
+ if constexpr (Spec::has_texture_buffers) {
+ for (const auto& desc : info.texture_buffer_descriptors) {
+ add_image(desc);
+ }
+ }
+ if constexpr (Spec::has_image_buffers) {
+ for (const auto& desc : info.image_buffer_descriptors) {
+ add_image(desc);
+ }
+ }
+ for (const auto& desc : info.texture_descriptors) {
+ for (u32 index = 0; index < desc.count; ++index) {
+ const auto handle{read_handle(desc, index)};
+ image_view_indices[image_index++] = handle.first;
- std::vector<vk::ShaderModule> shader_modules;
- shader_modules.reserve(Maxwell::MaxShaderStage);
- for (std::size_t i = 0; i < Maxwell::MaxShaderStage; ++i) {
- const auto& stage = program[i];
- if (!stage) {
- continue;
+ Sampler* const sampler{texture_cache.GetGraphicsSampler(handle.second)};
+ samplers[sampler_index++] = sampler->Handle();
+ }
+ }
+ if constexpr (Spec::has_images) {
+ for (const auto& desc : info.image_descriptors) {
+ add_image(desc);
+ }
}
+ }};
+ if constexpr (Spec::enabled_stages[0]) {
+ config_stage(0);
+ }
+ if constexpr (Spec::enabled_stages[1]) {
+ config_stage(1);
+ }
+ if constexpr (Spec::enabled_stages[2]) {
+ config_stage(2);
+ }
+ if constexpr (Spec::enabled_stages[3]) {
+ config_stage(3);
+ }
+ if constexpr (Spec::enabled_stages[4]) {
+ config_stage(4);
+ }
+ const std::span indices_span(image_view_indices.data(), image_index);
+ texture_cache.FillGraphicsImageViews(indices_span, image_view_ids);
+
+ ImageId* texture_buffer_index{image_view_ids.data()};
+ const auto bind_stage_info{[&](size_t stage) LAMBDA_FORCEINLINE {
+ size_t index{};
+ const auto add_buffer{[&](const auto& desc) {
+ constexpr bool is_image = std::is_same_v<decltype(desc), const ImageBufferDescriptor&>;
+ for (u32 i = 0; i < desc.count; ++i) {
+ bool is_written{false};
+ if constexpr (is_image) {
+ is_written = desc.is_written;
+ }
+ ImageView& image_view{texture_cache.GetImageView(*texture_buffer_index)};
+ buffer_cache.BindGraphicsTextureBuffer(stage, index, image_view.GpuAddr(),
+ image_view.BufferSize(), image_view.format,
+ is_written, is_image);
+ ++index;
+ ++texture_buffer_index;
+ }
+ }};
+ buffer_cache.UnbindGraphicsTextureBuffers(stage);
- device.SaveShader(stage->code);
+ const Shader::Info& info{stage_infos[stage]};
+ if constexpr (Spec::has_texture_buffers) {
+ for (const auto& desc : info.texture_buffer_descriptors) {
+ add_buffer(desc);
+ }
+ }
+ if constexpr (Spec::has_image_buffers) {
+ for (const auto& desc : info.image_buffer_descriptors) {
+ add_buffer(desc);
+ }
+ }
+ for (const auto& desc : info.texture_descriptors) {
+ texture_buffer_index += desc.count;
+ }
+ if constexpr (Spec::has_images) {
+ for (const auto& desc : info.image_descriptors) {
+ texture_buffer_index += desc.count;
+ }
+ }
+ }};
+ if constexpr (Spec::enabled_stages[0]) {
+ bind_stage_info(0);
+ }
+ if constexpr (Spec::enabled_stages[1]) {
+ bind_stage_info(1);
+ }
+ if constexpr (Spec::enabled_stages[2]) {
+ bind_stage_info(2);
+ }
+ if constexpr (Spec::enabled_stages[3]) {
+ bind_stage_info(3);
+ }
+ if constexpr (Spec::enabled_stages[4]) {
+ bind_stage_info(4);
+ }
+
+ buffer_cache.UpdateGraphicsBuffers(is_indexed);
+ buffer_cache.BindHostGeometryBuffers(is_indexed);
- ci.codeSize = stage->code.size() * sizeof(u32);
- ci.pCode = stage->code.data();
- shader_modules.push_back(device.GetLogical().CreateShaderModule(ci));
+ update_descriptor_queue.Acquire();
+
+ const VkSampler* samplers_it{samplers.data()};
+ const ImageId* views_it{image_view_ids.data()};
+ const auto prepare_stage{[&](size_t stage) LAMBDA_FORCEINLINE {
+ buffer_cache.BindHostStageBuffers(stage);
+ PushImageDescriptors(stage_infos[stage], samplers_it, views_it, texture_cache,
+ update_descriptor_queue);
+ }};
+ if constexpr (Spec::enabled_stages[0]) {
+ prepare_stage(0);
+ }
+ if constexpr (Spec::enabled_stages[1]) {
+ prepare_stage(1);
}
- return shader_modules;
+ if constexpr (Spec::enabled_stages[2]) {
+ prepare_stage(2);
+ }
+ if constexpr (Spec::enabled_stages[3]) {
+ prepare_stage(3);
+ }
+ if constexpr (Spec::enabled_stages[4]) {
+ prepare_stage(4);
+ }
+ ConfigureDraw();
}
-vk::Pipeline VKGraphicsPipeline::CreatePipeline(const SPIRVProgram& program,
- VkRenderPass renderpass,
- u32 num_color_buffers) const {
- const auto& state = cache_key.fixed_state;
- const auto& viewport_swizzles = state.viewport_swizzles;
-
- FixedPipelineState::DynamicState dynamic;
- if (device.IsExtExtendedDynamicStateSupported()) {
- // Insert dummy values, as long as they are valid they don't matter as extended dynamic
- // state is ignored
- dynamic.raw1 = 0;
- dynamic.raw2 = 0;
- dynamic.vertex_strides.fill(0);
- } else {
- dynamic = state.dynamic_state;
- }
-
- std::vector<VkVertexInputBindingDescription> vertex_bindings;
- std::vector<VkVertexInputBindingDivisorDescriptionEXT> vertex_binding_divisors;
- for (std::size_t index = 0; index < Maxwell::NumVertexArrays; ++index) {
- const bool instanced = state.binding_divisors[index] != 0;
- const auto rate = instanced ? VK_VERTEX_INPUT_RATE_INSTANCE : VK_VERTEX_INPUT_RATE_VERTEX;
- vertex_bindings.push_back({
- .binding = static_cast<u32>(index),
- .stride = dynamic.vertex_strides[index],
- .inputRate = rate,
+void GraphicsPipeline::ConfigureDraw() {
+ texture_cache.UpdateRenderTargets(false);
+ scheduler.RequestRenderpass(texture_cache.GetFramebuffer());
+
+ if (!is_built.load(std::memory_order::relaxed)) {
+ // Wait for the pipeline to be built
+ scheduler.Record([this](vk::CommandBuffer) {
+ std::unique_lock lock{build_mutex};
+ build_condvar.wait(lock, [this] { return is_built.load(std::memory_order::relaxed); });
});
- if (instanced) {
- vertex_binding_divisors.push_back({
- .binding = static_cast<u32>(index),
- .divisor = state.binding_divisors[index],
- });
- }
}
+ const bool bind_pipeline{scheduler.UpdateGraphicsPipeline(this)};
+ const void* const descriptor_data{update_descriptor_queue.UpdateData()};
+ scheduler.Record([this, descriptor_data, bind_pipeline](vk::CommandBuffer cmdbuf) {
+ if (bind_pipeline) {
+ cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_GRAPHICS, *pipeline);
+ }
+ if (!descriptor_set_layout) {
+ return;
+ }
+ if (uses_push_descriptor) {
+ cmdbuf.PushDescriptorSetWithTemplateKHR(*descriptor_update_template, *pipeline_layout,
+ 0, descriptor_data);
+ } else {
+ const VkDescriptorSet descriptor_set{descriptor_allocator.Commit()};
+ const vk::Device& dev{device.GetLogical()};
+ dev.UpdateDescriptorSet(descriptor_set, *descriptor_update_template, descriptor_data);
+ cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_GRAPHICS, *pipeline_layout, 0,
+ descriptor_set, nullptr);
+ }
+ });
+}
- std::vector<VkVertexInputAttributeDescription> vertex_attributes;
- const auto& input_attributes = program[0]->entries.attributes;
- for (std::size_t index = 0; index < state.attributes.size(); ++index) {
- const auto& attribute = state.attributes[index];
- if (!attribute.enabled) {
- continue;
+void GraphicsPipeline::MakePipeline(VkRenderPass render_pass) {
+ FixedPipelineState::DynamicState dynamic{};
+ if (!key.state.extended_dynamic_state) {
+ dynamic = key.state.dynamic_state;
+ }
+ static_vector<VkVertexInputBindingDescription, 32> vertex_bindings;
+ static_vector<VkVertexInputBindingDivisorDescriptionEXT, 32> vertex_binding_divisors;
+ static_vector<VkVertexInputAttributeDescription, 32> vertex_attributes;
+ if (key.state.dynamic_vertex_input) {
+ for (size_t index = 0; index < key.state.attributes.size(); ++index) {
+ const u32 type = key.state.DynamicAttributeType(index);
+ if (!stage_infos[0].loads.Generic(index) || type == 0) {
+ continue;
+ }
+ vertex_attributes.push_back({
+ .location = static_cast<u32>(index),
+ .binding = 0,
+ .format = type == 1 ? VK_FORMAT_R32_SFLOAT
+ : type == 2 ? VK_FORMAT_R32_SINT : VK_FORMAT_R32_UINT,
+ .offset = 0,
+ });
}
- if (!input_attributes.contains(static_cast<u32>(index))) {
- // Skip attributes not used by the vertex shaders.
- continue;
+ if (!vertex_attributes.empty()) {
+ vertex_bindings.push_back({
+ .binding = 0,
+ .stride = 4,
+ .inputRate = VK_VERTEX_INPUT_RATE_VERTEX,
+ });
+ }
+ } else {
+ for (size_t index = 0; index < Maxwell::NumVertexArrays; ++index) {
+ const bool instanced = key.state.binding_divisors[index] != 0;
+ const auto rate =
+ instanced ? VK_VERTEX_INPUT_RATE_INSTANCE : VK_VERTEX_INPUT_RATE_VERTEX;
+ vertex_bindings.push_back({
+ .binding = static_cast<u32>(index),
+ .stride = dynamic.vertex_strides[index],
+ .inputRate = rate,
+ });
+ if (instanced) {
+ vertex_binding_divisors.push_back({
+ .binding = static_cast<u32>(index),
+ .divisor = key.state.binding_divisors[index],
+ });
+ }
+ }
+ for (size_t index = 0; index < key.state.attributes.size(); ++index) {
+ const auto& attribute = key.state.attributes[index];
+ if (!attribute.enabled || !stage_infos[0].loads.Generic(index)) {
+ continue;
+ }
+ vertex_attributes.push_back({
+ .location = static_cast<u32>(index),
+ .binding = attribute.buffer,
+ .format = MaxwellToVK::VertexFormat(attribute.Type(), attribute.Size()),
+ .offset = attribute.offset,
+ });
}
- vertex_attributes.push_back({
- .location = static_cast<u32>(index),
- .binding = attribute.buffer,
- .format = MaxwellToVK::VertexFormat(attribute.Type(), attribute.Size()),
- .offset = attribute.offset,
- });
}
-
VkPipelineVertexInputStateCreateInfo vertex_input_ci{
.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,
.pNext = nullptr,
@@ -264,7 +554,6 @@ vk::Pipeline VKGraphicsPipeline::CreatePipeline(const SPIRVProgram& program,
.vertexAttributeDescriptionCount = static_cast<u32>(vertex_attributes.size()),
.pVertexAttributeDescriptions = vertex_attributes.data(),
};
-
const VkPipelineVertexInputDivisorStateCreateInfoEXT input_divisor_ci{
.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT,
.pNext = nullptr,
@@ -274,78 +563,113 @@ vk::Pipeline VKGraphicsPipeline::CreatePipeline(const SPIRVProgram& program,
if (!vertex_binding_divisors.empty()) {
vertex_input_ci.pNext = &input_divisor_ci;
}
-
- const auto input_assembly_topology = MaxwellToVK::PrimitiveTopology(device, state.topology);
+ auto input_assembly_topology = MaxwellToVK::PrimitiveTopology(device, key.state.topology);
+ if (input_assembly_topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST) {
+ if (!spv_modules[1] && !spv_modules[2]) {
+ LOG_WARNING(Render_Vulkan, "Patch topology used without tessellation, using points");
+ input_assembly_topology = VK_PRIMITIVE_TOPOLOGY_POINT_LIST;
+ }
+ }
const VkPipelineInputAssemblyStateCreateInfo input_assembly_ci{
.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
- .topology = MaxwellToVK::PrimitiveTopology(device, state.topology),
- .primitiveRestartEnable = state.primitive_restart_enable != 0 &&
+ .topology = input_assembly_topology,
+ .primitiveRestartEnable = key.state.primitive_restart_enable != 0 &&
SupportsPrimitiveRestart(input_assembly_topology),
};
-
const VkPipelineTessellationStateCreateInfo tessellation_ci{
.sType = VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
- .patchControlPoints = state.patch_control_points_minus_one.Value() + 1,
- };
-
- VkPipelineViewportStateCreateInfo viewport_ci{
- .sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO,
- .pNext = nullptr,
- .flags = 0,
- .viewportCount = Maxwell::NumViewports,
- .pViewports = nullptr,
- .scissorCount = Maxwell::NumViewports,
- .pScissors = nullptr,
+ .patchControlPoints = key.state.patch_control_points_minus_one.Value() + 1,
};
std::array<VkViewportSwizzleNV, Maxwell::NumViewports> swizzles;
- std::ranges::transform(viewport_swizzles, swizzles.begin(), UnpackViewportSwizzle);
- VkPipelineViewportSwizzleStateCreateInfoNV swizzle_ci{
+ std::ranges::transform(key.state.viewport_swizzles, swizzles.begin(), UnpackViewportSwizzle);
+ const VkPipelineViewportSwizzleStateCreateInfoNV swizzle_ci{
.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_SWIZZLE_STATE_CREATE_INFO_NV,
.pNext = nullptr,
.flags = 0,
.viewportCount = Maxwell::NumViewports,
.pViewportSwizzles = swizzles.data(),
};
- if (device.IsNvViewportSwizzleSupported()) {
- viewport_ci.pNext = &swizzle_ci;
- }
+ const VkPipelineViewportStateCreateInfo viewport_ci{
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO,
+ .pNext = device.IsNvViewportSwizzleSupported() ? &swizzle_ci : nullptr,
+ .flags = 0,
+ .viewportCount = Maxwell::NumViewports,
+ .pViewports = nullptr,
+ .scissorCount = Maxwell::NumViewports,
+ .pScissors = nullptr,
+ };
- const VkPipelineRasterizationStateCreateInfo rasterization_ci{
+ VkPipelineRasterizationStateCreateInfo rasterization_ci{
.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.depthClampEnable =
- static_cast<VkBool32>(state.depth_clamp_disabled == 0 ? VK_TRUE : VK_FALSE),
+ static_cast<VkBool32>(key.state.depth_clamp_disabled == 0 ? VK_TRUE : VK_FALSE),
.rasterizerDiscardEnable =
- static_cast<VkBool32>(state.rasterize_enable == 0 ? VK_TRUE : VK_FALSE),
- .polygonMode = VK_POLYGON_MODE_FILL,
+ static_cast<VkBool32>(key.state.rasterize_enable == 0 ? VK_TRUE : VK_FALSE),
+ .polygonMode =
+ MaxwellToVK::PolygonMode(FixedPipelineState::UnpackPolygonMode(key.state.polygon_mode)),
.cullMode = static_cast<VkCullModeFlags>(
dynamic.cull_enable ? MaxwellToVK::CullFace(dynamic.CullFace()) : VK_CULL_MODE_NONE),
.frontFace = MaxwellToVK::FrontFace(dynamic.FrontFace()),
- .depthBiasEnable = state.depth_bias_enable,
+ .depthBiasEnable = key.state.depth_bias_enable,
.depthBiasConstantFactor = 0.0f,
.depthBiasClamp = 0.0f,
.depthBiasSlopeFactor = 0.0f,
.lineWidth = 1.0f,
};
+ VkPipelineRasterizationLineStateCreateInfoEXT line_state{
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_EXT,
+ .pNext = nullptr,
+ .lineRasterizationMode = key.state.smooth_lines != 0
+ ? VK_LINE_RASTERIZATION_MODE_RECTANGULAR_SMOOTH_EXT
+ : VK_LINE_RASTERIZATION_MODE_RECTANGULAR_EXT,
+ .stippledLineEnable = VK_FALSE, // TODO
+ .lineStippleFactor = 0,
+ .lineStipplePattern = 0,
+ };
+ VkPipelineRasterizationConservativeStateCreateInfoEXT conservative_raster{
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_CONSERVATIVE_STATE_CREATE_INFO_EXT,
+ .pNext = nullptr,
+ .flags = 0,
+ .conservativeRasterizationMode = key.state.conservative_raster_enable != 0
+ ? VK_CONSERVATIVE_RASTERIZATION_MODE_OVERESTIMATE_EXT
+ : VK_CONSERVATIVE_RASTERIZATION_MODE_DISABLED_EXT,
+ .extraPrimitiveOverestimationSize = 0.0f,
+ };
+ VkPipelineRasterizationProvokingVertexStateCreateInfoEXT provoking_vertex{
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_PROVOKING_VERTEX_STATE_CREATE_INFO_EXT,
+ .pNext = nullptr,
+ .provokingVertexMode = key.state.provoking_vertex_last != 0
+ ? VK_PROVOKING_VERTEX_MODE_LAST_VERTEX_EXT
+ : VK_PROVOKING_VERTEX_MODE_FIRST_VERTEX_EXT,
+ };
+ if (IsLine(input_assembly_topology) && device.IsExtLineRasterizationSupported()) {
+ line_state.pNext = std::exchange(rasterization_ci.pNext, &line_state);
+ }
+ if (device.IsExtConservativeRasterizationSupported()) {
+ conservative_raster.pNext = std::exchange(rasterization_ci.pNext, &conservative_raster);
+ }
+ if (device.IsExtProvokingVertexSupported()) {
+ provoking_vertex.pNext = std::exchange(rasterization_ci.pNext, &provoking_vertex);
+ }
const VkPipelineMultisampleStateCreateInfo multisample_ci{
.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
- .rasterizationSamples = ConvertMsaaMode(state.msaa_mode),
+ .rasterizationSamples = MaxwellToVK::MsaaMode(key.state.msaa_mode),
.sampleShadingEnable = VK_FALSE,
.minSampleShading = 0.0f,
.pSampleMask = nullptr,
.alphaToCoverageEnable = VK_FALSE,
.alphaToOneEnable = VK_FALSE,
};
-
const VkPipelineDepthStencilStateCreateInfo depth_stencil_ci{
.sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO,
.pNext = nullptr,
@@ -355,32 +679,32 @@ vk::Pipeline VKGraphicsPipeline::CreatePipeline(const SPIRVProgram& program,
.depthCompareOp = dynamic.depth_test_enable
? MaxwellToVK::ComparisonOp(dynamic.DepthTestFunc())
: VK_COMPARE_OP_ALWAYS,
- .depthBoundsTestEnable = dynamic.depth_bounds_enable,
+ .depthBoundsTestEnable = dynamic.depth_bounds_enable && device.IsDepthBoundsSupported(),
.stencilTestEnable = dynamic.stencil_enable,
.front = GetStencilFaceState(dynamic.front),
.back = GetStencilFaceState(dynamic.back),
.minDepthBounds = 0.0f,
.maxDepthBounds = 0.0f,
};
-
- std::array<VkPipelineColorBlendAttachmentState, Maxwell::NumRenderTargets> cb_attachments;
- for (std::size_t index = 0; index < num_color_buffers; ++index) {
- static constexpr std::array COMPONENT_TABLE{
+ if (dynamic.depth_bounds_enable && !device.IsDepthBoundsSupported()) {
+ LOG_WARNING(Render_Vulkan, "Depth bounds is enabled but not supported");
+ }
+ static_vector<VkPipelineColorBlendAttachmentState, Maxwell::NumRenderTargets> cb_attachments;
+ const size_t num_attachments{NumAttachments(key.state)};
+ for (size_t index = 0; index < num_attachments; ++index) {
+ static constexpr std::array mask_table{
VK_COLOR_COMPONENT_R_BIT,
VK_COLOR_COMPONENT_G_BIT,
VK_COLOR_COMPONENT_B_BIT,
VK_COLOR_COMPONENT_A_BIT,
};
- const auto& blend = state.attachments[index];
-
- VkColorComponentFlags color_components = 0;
- for (std::size_t i = 0; i < COMPONENT_TABLE.size(); ++i) {
- if (blend.Mask()[i]) {
- color_components |= COMPONENT_TABLE[i];
- }
+ const auto& blend{key.state.attachments[index]};
+ const std::array mask{blend.Mask()};
+ VkColorComponentFlags write_mask{};
+ for (size_t i = 0; i < mask_table.size(); ++i) {
+ write_mask |= mask[i] ? mask_table[i] : 0;
}
-
- cb_attachments[index] = {
+ cb_attachments.push_back({
.blendEnable = blend.enable != 0,
.srcColorBlendFactor = MaxwellToVK::BlendFactor(blend.SourceRGBFactor()),
.dstColorBlendFactor = MaxwellToVK::BlendFactor(blend.DestRGBFactor()),
@@ -388,28 +712,27 @@ vk::Pipeline VKGraphicsPipeline::CreatePipeline(const SPIRVProgram& program,
.srcAlphaBlendFactor = MaxwellToVK::BlendFactor(blend.SourceAlphaFactor()),
.dstAlphaBlendFactor = MaxwellToVK::BlendFactor(blend.DestAlphaFactor()),
.alphaBlendOp = MaxwellToVK::BlendEquation(blend.EquationAlpha()),
- .colorWriteMask = color_components,
- };
+ .colorWriteMask = write_mask,
+ });
}
-
const VkPipelineColorBlendStateCreateInfo color_blend_ci{
.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.logicOpEnable = VK_FALSE,
.logicOp = VK_LOGIC_OP_COPY,
- .attachmentCount = num_color_buffers,
+ .attachmentCount = static_cast<u32>(cb_attachments.size()),
.pAttachments = cb_attachments.data(),
.blendConstants = {},
};
-
- std::vector dynamic_states{
+ static_vector<VkDynamicState, 19> dynamic_states{
VK_DYNAMIC_STATE_VIEWPORT, VK_DYNAMIC_STATE_SCISSOR,
VK_DYNAMIC_STATE_DEPTH_BIAS, VK_DYNAMIC_STATE_BLEND_CONSTANTS,
VK_DYNAMIC_STATE_DEPTH_BOUNDS, VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK,
VK_DYNAMIC_STATE_STENCIL_WRITE_MASK, VK_DYNAMIC_STATE_STENCIL_REFERENCE,
+ VK_DYNAMIC_STATE_LINE_WIDTH,
};
- if (device.IsExtExtendedDynamicStateSupported()) {
+ if (key.state.extended_dynamic_state) {
static constexpr std::array extended{
VK_DYNAMIC_STATE_CULL_MODE_EXT,
VK_DYNAMIC_STATE_FRONT_FACE_EXT,
@@ -421,9 +744,11 @@ vk::Pipeline VKGraphicsPipeline::CreatePipeline(const SPIRVProgram& program,
VK_DYNAMIC_STATE_STENCIL_TEST_ENABLE_EXT,
VK_DYNAMIC_STATE_STENCIL_OP_EXT,
};
+ if (key.state.dynamic_vertex_input) {
+ dynamic_states.push_back(VK_DYNAMIC_STATE_VERTEX_INPUT_EXT);
+ }
dynamic_states.insert(dynamic_states.end(), extended.begin(), extended.end());
}
-
const VkPipelineDynamicStateCreateInfo dynamic_state_ci{
.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO,
.pNext = nullptr,
@@ -431,34 +756,33 @@ vk::Pipeline VKGraphicsPipeline::CreatePipeline(const SPIRVProgram& program,
.dynamicStateCount = static_cast<u32>(dynamic_states.size()),
.pDynamicStates = dynamic_states.data(),
};
-
- const VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT subgroup_size_ci{
+ [[maybe_unused]] const VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT subgroup_size_ci{
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT,
.pNext = nullptr,
.requiredSubgroupSize = GuestWarpSize,
};
-
- std::vector<VkPipelineShaderStageCreateInfo> shader_stages;
- std::size_t module_index = 0;
- for (std::size_t stage = 0; stage < Maxwell::MaxShaderStage; ++stage) {
- if (!program[stage]) {
+ static_vector<VkPipelineShaderStageCreateInfo, 5> shader_stages;
+ for (size_t stage = 0; stage < Maxwell::MaxShaderStage; ++stage) {
+ if (!spv_modules[stage]) {
continue;
}
-
- VkPipelineShaderStageCreateInfo& stage_ci = shader_stages.emplace_back();
- stage_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
- stage_ci.pNext = nullptr;
- stage_ci.flags = 0;
- stage_ci.stage = MaxwellToVK::ShaderStage(static_cast<Tegra::Engines::ShaderType>(stage));
- stage_ci.module = *modules[module_index++];
- stage_ci.pName = "main";
- stage_ci.pSpecializationInfo = nullptr;
-
+ [[maybe_unused]] auto& stage_ci =
+ shader_stages.emplace_back(VkPipelineShaderStageCreateInfo{
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
+ .pNext = nullptr,
+ .flags = 0,
+ .stage = MaxwellToVK::ShaderStage(Shader::StageFromIndex(stage)),
+ .module = *spv_modules[stage],
+ .pName = "main",
+ .pSpecializationInfo = nullptr,
+ });
+ /*
if (program[stage]->entries.uses_warps && device.IsGuestWarpSizeSupported(stage_ci.stage)) {
stage_ci.pNext = &subgroup_size_ci;
}
+ */
}
- return device.GetLogical().CreateGraphicsPipeline(VkGraphicsPipelineCreateInfo{
+ pipeline = device.GetLogical().CreateGraphicsPipeline({
.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
@@ -473,12 +797,31 @@ vk::Pipeline VKGraphicsPipeline::CreatePipeline(const SPIRVProgram& program,
.pDepthStencilState = &depth_stencil_ci,
.pColorBlendState = &color_blend_ci,
.pDynamicState = &dynamic_state_ci,
- .layout = *layout,
- .renderPass = renderpass,
+ .layout = *pipeline_layout,
+ .renderPass = render_pass,
.subpass = 0,
.basePipelineHandle = nullptr,
.basePipelineIndex = 0,
});
}
+void GraphicsPipeline::Validate() {
+ size_t num_images{};
+ for (const auto& info : stage_infos) {
+ for (const auto& desc : info.texture_buffer_descriptors) {
+ num_images += desc.count;
+ }
+ for (const auto& desc : info.image_buffer_descriptors) {
+ num_images += desc.count;
+ }
+ for (const auto& desc : info.texture_descriptors) {
+ num_images += desc.count;
+ }
+ for (const auto& desc : info.image_descriptors) {
+ num_images += desc.count;
+ }
+ }
+ ASSERT(num_images <= MAX_IMAGE_ELEMENTS);
+}
+
} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_graphics_pipeline.h b/src/video_core/renderer_vulkan/vk_graphics_pipeline.h
index 8b6a98fe0..2bd48d697 100644
--- a/src/video_core/renderer_vulkan/vk_graphics_pipeline.h
+++ b/src/video_core/renderer_vulkan/vk_graphics_pipeline.h
@@ -1,30 +1,36 @@
-// Copyright 2019 yuzu Emulator Project
+// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
+#include <algorithm>
#include <array>
-#include <optional>
-#include <vector>
+#include <atomic>
+#include <condition_variable>
+#include <mutex>
+#include <type_traits>
-#include "common/common_types.h"
+#include "common/thread_worker.h"
+#include "shader_recompiler/shader_info.h"
#include "video_core/engines/maxwell_3d.h"
#include "video_core/renderer_vulkan/fixed_pipeline_state.h"
+#include "video_core/renderer_vulkan/vk_buffer_cache.h"
#include "video_core/renderer_vulkan/vk_descriptor_pool.h"
-#include "video_core/renderer_vulkan/vk_shader_decompiler.h"
+#include "video_core/renderer_vulkan/vk_texture_cache.h"
#include "video_core/vulkan_common/vulkan_wrapper.h"
-namespace Vulkan {
+namespace VideoCore {
+class ShaderNotify;
+}
-using Maxwell = Tegra::Engines::Maxwell3D::Regs;
+namespace Vulkan {
struct GraphicsPipelineCacheKey {
- VkRenderPass renderpass;
- std::array<GPUVAddr, Maxwell::MaxShaderProgram> shaders;
- FixedPipelineState fixed_state;
+ std::array<u64, 6> unique_hashes;
+ FixedPipelineState state;
- std::size_t Hash() const noexcept;
+ size_t Hash() const noexcept;
bool operator==(const GraphicsPipelineCacheKey& rhs) const noexcept;
@@ -32,72 +38,115 @@ struct GraphicsPipelineCacheKey {
return !operator==(rhs);
}
- std::size_t Size() const noexcept {
- return sizeof(renderpass) + sizeof(shaders) + fixed_state.Size();
+ size_t Size() const noexcept {
+ return sizeof(unique_hashes) + state.Size();
}
};
static_assert(std::has_unique_object_representations_v<GraphicsPipelineCacheKey>);
static_assert(std::is_trivially_copyable_v<GraphicsPipelineCacheKey>);
static_assert(std::is_trivially_constructible_v<GraphicsPipelineCacheKey>);
+} // namespace Vulkan
+
+namespace std {
+template <>
+struct hash<Vulkan::GraphicsPipelineCacheKey> {
+ size_t operator()(const Vulkan::GraphicsPipelineCacheKey& k) const noexcept {
+ return k.Hash();
+ }
+};
+} // namespace std
+
+namespace Vulkan {
+
class Device;
-class VKDescriptorPool;
+class RenderPassCache;
class VKScheduler;
class VKUpdateDescriptorQueue;
-using SPIRVProgram = std::array<std::optional<SPIRVShader>, Maxwell::MaxShaderStage>;
+class GraphicsPipeline {
+ static constexpr size_t NUM_STAGES = Tegra::Engines::Maxwell3D::Regs::MaxShaderStage;
-class VKGraphicsPipeline final {
public:
- explicit VKGraphicsPipeline(const Device& device_, VKScheduler& scheduler_,
- VKDescriptorPool& descriptor_pool,
- VKUpdateDescriptorQueue& update_descriptor_queue_,
- const GraphicsPipelineCacheKey& key,
- vk::Span<VkDescriptorSetLayoutBinding> bindings,
- const SPIRVProgram& program, u32 num_color_buffers);
- ~VKGraphicsPipeline();
-
- VkDescriptorSet CommitDescriptorSet();
-
- VkPipeline GetHandle() const {
- return *pipeline;
+ explicit GraphicsPipeline(
+ Tegra::Engines::Maxwell3D& maxwell3d, Tegra::MemoryManager& gpu_memory,
+ VKScheduler& scheduler, BufferCache& buffer_cache, TextureCache& texture_cache,
+ VideoCore::ShaderNotify* shader_notify, const Device& device,
+ DescriptorPool& descriptor_pool, VKUpdateDescriptorQueue& update_descriptor_queue,
+ Common::ThreadWorker* worker_thread, RenderPassCache& render_pass_cache,
+ const GraphicsPipelineCacheKey& key, std::array<vk::ShaderModule, NUM_STAGES> stages,
+ const std::array<const Shader::Info*, NUM_STAGES>& infos);
+
+ GraphicsPipeline& operator=(GraphicsPipeline&&) noexcept = delete;
+ GraphicsPipeline(GraphicsPipeline&&) noexcept = delete;
+
+ GraphicsPipeline& operator=(const GraphicsPipeline&) = delete;
+ GraphicsPipeline(const GraphicsPipeline&) = delete;
+
+ void AddTransition(GraphicsPipeline* transition);
+
+ void Configure(bool is_indexed) {
+ configure_func(this, is_indexed);
}
- VkPipelineLayout GetLayout() const {
- return *layout;
+ [[nodiscard]] GraphicsPipeline* Next(const GraphicsPipelineCacheKey& current_key) noexcept {
+ if (key == current_key) {
+ return this;
+ }
+ const auto it{std::find(transition_keys.begin(), transition_keys.end(), current_key)};
+ return it != transition_keys.end() ? transitions[std::distance(transition_keys.begin(), it)]
+ : nullptr;
}
- GraphicsPipelineCacheKey GetCacheKey() const {
- return cache_key;
+ [[nodiscard]] bool IsBuilt() const noexcept {
+ return is_built.load(std::memory_order::relaxed);
}
-private:
- vk::DescriptorSetLayout CreateDescriptorSetLayout(
- vk::Span<VkDescriptorSetLayoutBinding> bindings) const;
+ template <typename Spec>
+ static auto MakeConfigureSpecFunc() {
+ return [](GraphicsPipeline* pl, bool is_indexed) { pl->ConfigureImpl<Spec>(is_indexed); };
+ }
- vk::PipelineLayout CreatePipelineLayout() const;
+private:
+ template <typename Spec>
+ void ConfigureImpl(bool is_indexed);
- vk::DescriptorUpdateTemplateKHR CreateDescriptorUpdateTemplate(
- const SPIRVProgram& program) const;
+ void ConfigureDraw();
- std::vector<vk::ShaderModule> CreateShaderModules(const SPIRVProgram& program) const;
+ void MakePipeline(VkRenderPass render_pass);
- vk::Pipeline CreatePipeline(const SPIRVProgram& program, VkRenderPass renderpass,
- u32 num_color_buffers) const;
+ void Validate();
+ const GraphicsPipelineCacheKey key;
+ Tegra::Engines::Maxwell3D& maxwell3d;
+ Tegra::MemoryManager& gpu_memory;
const Device& device;
+ TextureCache& texture_cache;
+ BufferCache& buffer_cache;
VKScheduler& scheduler;
- const GraphicsPipelineCacheKey cache_key;
- const u64 hash;
+ VKUpdateDescriptorQueue& update_descriptor_queue;
+
+ void (*configure_func)(GraphicsPipeline*, bool){};
+
+ std::vector<GraphicsPipelineCacheKey> transition_keys;
+ std::vector<GraphicsPipeline*> transitions;
+
+ std::array<vk::ShaderModule, NUM_STAGES> spv_modules;
+
+ std::array<Shader::Info, NUM_STAGES> stage_infos;
+ std::array<u32, 5> enabled_uniform_buffer_masks{};
+ VideoCommon::UniformBufferSizes uniform_buffer_sizes{};
vk::DescriptorSetLayout descriptor_set_layout;
DescriptorAllocator descriptor_allocator;
- VKUpdateDescriptorQueue& update_descriptor_queue;
- vk::PipelineLayout layout;
- vk::DescriptorUpdateTemplateKHR descriptor_template;
- std::vector<vk::ShaderModule> modules;
-
+ vk::PipelineLayout pipeline_layout;
+ vk::DescriptorUpdateTemplateKHR descriptor_update_template;
vk::Pipeline pipeline;
+
+ std::condition_variable build_condvar;
+ std::mutex build_mutex;
+ std::atomic_bool is_built{false};
+ bool uses_push_descriptor{false};
};
} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_master_semaphore.h b/src/video_core/renderer_vulkan/vk_master_semaphore.h
index ee3cd35d0..4f8688118 100644
--- a/src/video_core/renderer_vulkan/vk_master_semaphore.h
+++ b/src/video_core/renderer_vulkan/vk_master_semaphore.h
@@ -39,9 +39,9 @@ public:
return KnownGpuTick() >= tick;
}
- /// Advance to the logical tick.
- void NextTick() noexcept {
- ++current_tick;
+ /// Advance to the logical tick and return the old one
+ [[nodiscard]] u64 NextTick() noexcept {
+ return current_tick.fetch_add(1, std::memory_order::relaxed);
}
/// Refresh the known GPU tick
diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
index 8991505ca..57b163247 100644
--- a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
@@ -4,444 +4,613 @@
#include <algorithm>
#include <cstddef>
+#include <fstream>
#include <memory>
+#include <thread>
#include <vector>
#include "common/bit_cast.h"
#include "common/cityhash.h"
+#include "common/fs/fs.h"
+#include "common/fs/path_util.h"
#include "common/microprofile.h"
+#include "common/thread_worker.h"
#include "core/core.h"
#include "core/memory.h"
+#include "shader_recompiler/backend/spirv/emit_spirv.h"
+#include "shader_recompiler/environment.h"
+#include "shader_recompiler/frontend/maxwell/control_flow.h"
+#include "shader_recompiler/frontend/maxwell/translate_program.h"
+#include "shader_recompiler/program_header.h"
+#include "video_core/dirty_flags.h"
#include "video_core/engines/kepler_compute.h"
#include "video_core/engines/maxwell_3d.h"
#include "video_core/memory_manager.h"
#include "video_core/renderer_vulkan/fixed_pipeline_state.h"
#include "video_core/renderer_vulkan/maxwell_to_vk.h"
+#include "video_core/renderer_vulkan/pipeline_helper.h"
#include "video_core/renderer_vulkan/vk_compute_pipeline.h"
#include "video_core/renderer_vulkan/vk_descriptor_pool.h"
-#include "video_core/renderer_vulkan/vk_graphics_pipeline.h"
#include "video_core/renderer_vulkan/vk_pipeline_cache.h"
#include "video_core/renderer_vulkan/vk_rasterizer.h"
#include "video_core/renderer_vulkan/vk_scheduler.h"
+#include "video_core/renderer_vulkan/vk_shader_util.h"
#include "video_core/renderer_vulkan/vk_update_descriptor.h"
-#include "video_core/shader/compiler_settings.h"
-#include "video_core/shader/memory_util.h"
#include "video_core/shader_cache.h"
+#include "video_core/shader_environment.h"
#include "video_core/shader_notify.h"
#include "video_core/vulkan_common/vulkan_device.h"
#include "video_core/vulkan_common/vulkan_wrapper.h"
namespace Vulkan {
-
MICROPROFILE_DECLARE(Vulkan_PipelineCache);
-using Tegra::Engines::ShaderType;
-using VideoCommon::Shader::GetShaderAddress;
-using VideoCommon::Shader::GetShaderCode;
-using VideoCommon::Shader::KERNEL_MAIN_OFFSET;
-using VideoCommon::Shader::ProgramCode;
-using VideoCommon::Shader::STAGE_MAIN_OFFSET;
-
namespace {
+using Shader::Backend::SPIRV::EmitSPIRV;
+using Shader::Maxwell::MergeDualVertexPrograms;
+using Shader::Maxwell::TranslateProgram;
+using VideoCommon::ComputeEnvironment;
+using VideoCommon::FileEnvironment;
+using VideoCommon::GenericEnvironment;
+using VideoCommon::GraphicsEnvironment;
+
+constexpr u32 CACHE_VERSION = 5;
+
+template <typename Container>
+auto MakeSpan(Container& container) {
+ return std::span(container.data(), container.size());
+}
-constexpr VkDescriptorType UNIFORM_BUFFER = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
-constexpr VkDescriptorType STORAGE_BUFFER = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
-constexpr VkDescriptorType UNIFORM_TEXEL_BUFFER = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
-constexpr VkDescriptorType COMBINED_IMAGE_SAMPLER = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
-constexpr VkDescriptorType STORAGE_TEXEL_BUFFER = VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
-constexpr VkDescriptorType STORAGE_IMAGE = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
-
-constexpr VideoCommon::Shader::CompilerSettings compiler_settings{
- .depth = VideoCommon::Shader::CompileDepth::FullDecompile,
- .disable_else_derivation = true,
-};
-
-constexpr std::size_t GetStageFromProgram(std::size_t program) {
- return program == 0 ? 0 : program - 1;
+Shader::CompareFunction MaxwellToCompareFunction(Maxwell::ComparisonOp comparison) {
+ switch (comparison) {
+ case Maxwell::ComparisonOp::Never:
+ case Maxwell::ComparisonOp::NeverOld:
+ return Shader::CompareFunction::Never;
+ case Maxwell::ComparisonOp::Less:
+ case Maxwell::ComparisonOp::LessOld:
+ return Shader::CompareFunction::Less;
+ case Maxwell::ComparisonOp::Equal:
+ case Maxwell::ComparisonOp::EqualOld:
+ return Shader::CompareFunction::Equal;
+ case Maxwell::ComparisonOp::LessEqual:
+ case Maxwell::ComparisonOp::LessEqualOld:
+ return Shader::CompareFunction::LessThanEqual;
+ case Maxwell::ComparisonOp::Greater:
+ case Maxwell::ComparisonOp::GreaterOld:
+ return Shader::CompareFunction::Greater;
+ case Maxwell::ComparisonOp::NotEqual:
+ case Maxwell::ComparisonOp::NotEqualOld:
+ return Shader::CompareFunction::NotEqual;
+ case Maxwell::ComparisonOp::GreaterEqual:
+ case Maxwell::ComparisonOp::GreaterEqualOld:
+ return Shader::CompareFunction::GreaterThanEqual;
+ case Maxwell::ComparisonOp::Always:
+ case Maxwell::ComparisonOp::AlwaysOld:
+ return Shader::CompareFunction::Always;
+ }
+ UNIMPLEMENTED_MSG("Unimplemented comparison op={}", comparison);
+ return {};
}
-constexpr ShaderType GetStageFromProgram(Maxwell::ShaderProgram program) {
- return static_cast<ShaderType>(GetStageFromProgram(static_cast<std::size_t>(program)));
+Shader::AttributeType CastAttributeType(const FixedPipelineState::VertexAttribute& attr) {
+ if (attr.enabled == 0) {
+ return Shader::AttributeType::Disabled;
+ }
+ switch (attr.Type()) {
+ case Maxwell::VertexAttribute::Type::SignedNorm:
+ case Maxwell::VertexAttribute::Type::UnsignedNorm:
+ case Maxwell::VertexAttribute::Type::UnsignedScaled:
+ case Maxwell::VertexAttribute::Type::SignedScaled:
+ case Maxwell::VertexAttribute::Type::Float:
+ return Shader::AttributeType::Float;
+ case Maxwell::VertexAttribute::Type::SignedInt:
+ return Shader::AttributeType::SignedInt;
+ case Maxwell::VertexAttribute::Type::UnsignedInt:
+ return Shader::AttributeType::UnsignedInt;
+ }
+ return Shader::AttributeType::Float;
}
-ShaderType GetShaderType(Maxwell::ShaderProgram program) {
- switch (program) {
- case Maxwell::ShaderProgram::VertexB:
- return ShaderType::Vertex;
- case Maxwell::ShaderProgram::TesselationControl:
- return ShaderType::TesselationControl;
- case Maxwell::ShaderProgram::TesselationEval:
- return ShaderType::TesselationEval;
- case Maxwell::ShaderProgram::Geometry:
- return ShaderType::Geometry;
- case Maxwell::ShaderProgram::Fragment:
- return ShaderType::Fragment;
- default:
- UNIMPLEMENTED_MSG("program={}", program);
- return ShaderType::Vertex;
+Shader::AttributeType AttributeType(const FixedPipelineState& state, size_t index) {
+ switch (state.DynamicAttributeType(index)) {
+ case 0:
+ return Shader::AttributeType::Disabled;
+ case 1:
+ return Shader::AttributeType::Float;
+ case 2:
+ return Shader::AttributeType::SignedInt;
+ case 3:
+ return Shader::AttributeType::UnsignedInt;
}
+ return Shader::AttributeType::Disabled;
}
-template <VkDescriptorType descriptor_type, class Container>
-void AddBindings(std::vector<VkDescriptorSetLayoutBinding>& bindings, u32& binding,
- VkShaderStageFlags stage_flags, const Container& container) {
- const u32 num_entries = static_cast<u32>(std::size(container));
- for (std::size_t i = 0; i < num_entries; ++i) {
- u32 count = 1;
- if constexpr (descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) {
- // Combined image samplers can be arrayed.
- count = container[i].size;
+Shader::RuntimeInfo MakeRuntimeInfo(std::span<const Shader::IR::Program> programs,
+ const GraphicsPipelineCacheKey& key,
+ const Shader::IR::Program& program,
+ const Shader::IR::Program* previous_program) {
+ Shader::RuntimeInfo info;
+ if (previous_program) {
+ info.previous_stage_stores = previous_program->info.stores;
+ if (previous_program->is_geometry_passthrough) {
+ info.previous_stage_stores.mask |= previous_program->info.passthrough.mask;
}
- bindings.push_back({
- .binding = binding++,
- .descriptorType = descriptor_type,
- .descriptorCount = count,
- .stageFlags = stage_flags,
- .pImmutableSamplers = nullptr,
- });
+ } else {
+ info.previous_stage_stores.mask.set();
+ }
+ const Shader::Stage stage{program.stage};
+ const bool has_geometry{key.unique_hashes[4] != 0 && !programs[4].is_geometry_passthrough};
+ const bool gl_ndc{key.state.ndc_minus_one_to_one != 0};
+ const float point_size{Common::BitCast<float>(key.state.point_size)};
+ switch (stage) {
+ case Shader::Stage::VertexB:
+ if (!has_geometry) {
+ if (key.state.topology == Maxwell::PrimitiveTopology::Points) {
+ info.fixed_state_point_size = point_size;
+ }
+ if (key.state.xfb_enabled) {
+ info.xfb_varyings = VideoCommon::MakeTransformFeedbackVaryings(key.state.xfb_state);
+ }
+ info.convert_depth_mode = gl_ndc;
+ }
+ if (key.state.dynamic_vertex_input) {
+ for (size_t index = 0; index < Maxwell::NumVertexAttributes; ++index) {
+ info.generic_input_types[index] = AttributeType(key.state, index);
+ }
+ } else {
+ std::ranges::transform(key.state.attributes, info.generic_input_types.begin(),
+ &CastAttributeType);
+ }
+ break;
+ case Shader::Stage::TessellationEval:
+ // We have to flip tessellation clockwise for some reason...
+ info.tess_clockwise = key.state.tessellation_clockwise == 0;
+ info.tess_primitive = [&key] {
+ const u32 raw{key.state.tessellation_primitive.Value()};
+ switch (static_cast<Maxwell::TessellationPrimitive>(raw)) {
+ case Maxwell::TessellationPrimitive::Isolines:
+ return Shader::TessPrimitive::Isolines;
+ case Maxwell::TessellationPrimitive::Triangles:
+ return Shader::TessPrimitive::Triangles;
+ case Maxwell::TessellationPrimitive::Quads:
+ return Shader::TessPrimitive::Quads;
+ }
+ UNREACHABLE();
+ return Shader::TessPrimitive::Triangles;
+ }();
+ info.tess_spacing = [&] {
+ const u32 raw{key.state.tessellation_spacing};
+ switch (static_cast<Maxwell::TessellationSpacing>(raw)) {
+ case Maxwell::TessellationSpacing::Equal:
+ return Shader::TessSpacing::Equal;
+ case Maxwell::TessellationSpacing::FractionalOdd:
+ return Shader::TessSpacing::FractionalOdd;
+ case Maxwell::TessellationSpacing::FractionalEven:
+ return Shader::TessSpacing::FractionalEven;
+ }
+ UNREACHABLE();
+ return Shader::TessSpacing::Equal;
+ }();
+ break;
+ case Shader::Stage::Geometry:
+ if (program.output_topology == Shader::OutputTopology::PointList) {
+ info.fixed_state_point_size = point_size;
+ }
+ if (key.state.xfb_enabled != 0) {
+ info.xfb_varyings = VideoCommon::MakeTransformFeedbackVaryings(key.state.xfb_state);
+ }
+ info.convert_depth_mode = gl_ndc;
+ break;
+ case Shader::Stage::Fragment:
+ info.alpha_test_func = MaxwellToCompareFunction(
+ key.state.UnpackComparisonOp(key.state.alpha_test_func.Value()));
+ info.alpha_test_reference = Common::BitCast<float>(key.state.alpha_test_ref);
+ break;
+ default:
+ break;
+ }
+ switch (key.state.topology) {
+ case Maxwell::PrimitiveTopology::Points:
+ info.input_topology = Shader::InputTopology::Points;
+ break;
+ case Maxwell::PrimitiveTopology::Lines:
+ case Maxwell::PrimitiveTopology::LineLoop:
+ case Maxwell::PrimitiveTopology::LineStrip:
+ info.input_topology = Shader::InputTopology::Lines;
+ break;
+ case Maxwell::PrimitiveTopology::Triangles:
+ case Maxwell::PrimitiveTopology::TriangleStrip:
+ case Maxwell::PrimitiveTopology::TriangleFan:
+ case Maxwell::PrimitiveTopology::Quads:
+ case Maxwell::PrimitiveTopology::QuadStrip:
+ case Maxwell::PrimitiveTopology::Polygon:
+ case Maxwell::PrimitiveTopology::Patches:
+ info.input_topology = Shader::InputTopology::Triangles;
+ break;
+ case Maxwell::PrimitiveTopology::LinesAdjacency:
+ case Maxwell::PrimitiveTopology::LineStripAdjacency:
+ info.input_topology = Shader::InputTopology::LinesAdjacency;
+ break;
+ case Maxwell::PrimitiveTopology::TrianglesAdjacency:
+ case Maxwell::PrimitiveTopology::TriangleStripAdjacency:
+ info.input_topology = Shader::InputTopology::TrianglesAdjacency;
+ break;
}
+ info.force_early_z = key.state.early_z != 0;
+ info.y_negate = key.state.y_negate != 0;
+ return info;
}
+} // Anonymous namespace
-u32 FillDescriptorLayout(const ShaderEntries& entries,
- std::vector<VkDescriptorSetLayoutBinding>& bindings,
- Maxwell::ShaderProgram program_type, u32 base_binding) {
- const ShaderType stage = GetStageFromProgram(program_type);
- const VkShaderStageFlags flags = MaxwellToVK::ShaderStage(stage);
-
- u32 binding = base_binding;
- AddBindings<UNIFORM_BUFFER>(bindings, binding, flags, entries.const_buffers);
- AddBindings<STORAGE_BUFFER>(bindings, binding, flags, entries.global_buffers);
- AddBindings<UNIFORM_TEXEL_BUFFER>(bindings, binding, flags, entries.uniform_texels);
- AddBindings<COMBINED_IMAGE_SAMPLER>(bindings, binding, flags, entries.samplers);
- AddBindings<STORAGE_TEXEL_BUFFER>(bindings, binding, flags, entries.storage_texels);
- AddBindings<STORAGE_IMAGE>(bindings, binding, flags, entries.images);
- return binding;
+size_t ComputePipelineCacheKey::Hash() const noexcept {
+ const u64 hash = Common::CityHash64(reinterpret_cast<const char*>(this), sizeof *this);
+ return static_cast<size_t>(hash);
}
-} // Anonymous namespace
+bool ComputePipelineCacheKey::operator==(const ComputePipelineCacheKey& rhs) const noexcept {
+ return std::memcmp(&rhs, this, sizeof *this) == 0;
+}
-std::size_t GraphicsPipelineCacheKey::Hash() const noexcept {
+size_t GraphicsPipelineCacheKey::Hash() const noexcept {
const u64 hash = Common::CityHash64(reinterpret_cast<const char*>(this), Size());
- return static_cast<std::size_t>(hash);
+ return static_cast<size_t>(hash);
}
bool GraphicsPipelineCacheKey::operator==(const GraphicsPipelineCacheKey& rhs) const noexcept {
return std::memcmp(&rhs, this, Size()) == 0;
}
-std::size_t ComputePipelineCacheKey::Hash() const noexcept {
- const u64 hash = Common::CityHash64(reinterpret_cast<const char*>(this), sizeof *this);
- return static_cast<std::size_t>(hash);
-}
-
-bool ComputePipelineCacheKey::operator==(const ComputePipelineCacheKey& rhs) const noexcept {
- return std::memcmp(&rhs, this, sizeof *this) == 0;
+PipelineCache::PipelineCache(RasterizerVulkan& rasterizer_, Tegra::Engines::Maxwell3D& maxwell3d_,
+ Tegra::Engines::KeplerCompute& kepler_compute_,
+ Tegra::MemoryManager& gpu_memory_, const Device& device_,
+ VKScheduler& scheduler_, DescriptorPool& descriptor_pool_,
+ VKUpdateDescriptorQueue& update_descriptor_queue_,
+ RenderPassCache& render_pass_cache_, BufferCache& buffer_cache_,
+ TextureCache& texture_cache_, VideoCore::ShaderNotify& shader_notify_)
+ : VideoCommon::ShaderCache{rasterizer_, gpu_memory_, maxwell3d_, kepler_compute_},
+ device{device_}, scheduler{scheduler_}, descriptor_pool{descriptor_pool_},
+ update_descriptor_queue{update_descriptor_queue_}, render_pass_cache{render_pass_cache_},
+ buffer_cache{buffer_cache_}, texture_cache{texture_cache_}, shader_notify{shader_notify_},
+ use_asynchronous_shaders{Settings::values.use_asynchronous_shaders.GetValue()},
+ workers(std::max(std::thread::hardware_concurrency(), 2U) - 1, "yuzu:PipelineBuilder"),
+ serialization_thread(1, "yuzu:PipelineSerialization") {
+ const auto& float_control{device.FloatControlProperties()};
+ const VkDriverIdKHR driver_id{device.GetDriverID()};
+ profile = Shader::Profile{
+ .supported_spirv = device.IsKhrSpirv1_4Supported() ? 0x00010400U : 0x00010000U,
+ .unified_descriptor_binding = true,
+ .support_descriptor_aliasing = true,
+ .support_int8 = true,
+ .support_int16 = device.IsShaderInt16Supported(),
+ .support_int64 = device.IsShaderInt64Supported(),
+ .support_vertex_instance_id = false,
+ .support_float_controls = true,
+ .support_separate_denorm_behavior = float_control.denormBehaviorIndependence ==
+ VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_ALL_KHR,
+ .support_separate_rounding_mode =
+ float_control.roundingModeIndependence == VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_ALL_KHR,
+ .support_fp16_denorm_preserve = float_control.shaderDenormPreserveFloat16 != VK_FALSE,
+ .support_fp32_denorm_preserve = float_control.shaderDenormPreserveFloat32 != VK_FALSE,
+ .support_fp16_denorm_flush = float_control.shaderDenormFlushToZeroFloat16 != VK_FALSE,
+ .support_fp32_denorm_flush = float_control.shaderDenormFlushToZeroFloat32 != VK_FALSE,
+ .support_fp16_signed_zero_nan_preserve =
+ float_control.shaderSignedZeroInfNanPreserveFloat16 != VK_FALSE,
+ .support_fp32_signed_zero_nan_preserve =
+ float_control.shaderSignedZeroInfNanPreserveFloat32 != VK_FALSE,
+ .support_fp64_signed_zero_nan_preserve =
+ float_control.shaderSignedZeroInfNanPreserveFloat64 != VK_FALSE,
+ .support_explicit_workgroup_layout = device.IsKhrWorkgroupMemoryExplicitLayoutSupported(),
+ .support_vote = true,
+ .support_viewport_index_layer_non_geometry =
+ device.IsExtShaderViewportIndexLayerSupported(),
+ .support_viewport_mask = device.IsNvViewportArray2Supported(),
+ .support_typeless_image_loads = device.IsFormatlessImageLoadSupported(),
+ .support_demote_to_helper_invocation = true,
+ .support_int64_atomics = device.IsExtShaderAtomicInt64Supported(),
+ .support_derivative_control = true,
+ .support_geometry_shader_passthrough = device.IsNvGeometryShaderPassthroughSupported(),
+
+ .warp_size_potentially_larger_than_guest = device.IsWarpSizePotentiallyBiggerThanGuest(),
+
+ .lower_left_origin_mode = false,
+ .need_declared_frag_colors = false,
+
+ .has_broken_spirv_clamp = driver_id == VK_DRIVER_ID_INTEL_PROPRIETARY_WINDOWS_KHR,
+ .has_broken_unsigned_image_offsets = false,
+ .has_broken_signed_operations = false,
+ .has_broken_fp16_float_controls = driver_id == VK_DRIVER_ID_NVIDIA_PROPRIETARY_KHR,
+ .ignore_nan_fp_comparisons = false,
+ };
+ host_info = Shader::HostTranslateInfo{
+ .support_float16 = device.IsFloat16Supported(),
+ .support_int64 = device.IsShaderInt64Supported(),
+ };
}
-Shader::Shader(Tegra::Engines::ConstBufferEngineInterface& engine_, ShaderType stage_,
- GPUVAddr gpu_addr_, VAddr cpu_addr_, ProgramCode program_code_, u32 main_offset_)
- : gpu_addr(gpu_addr_), program_code(std::move(program_code_)), registry(stage_, engine_),
- shader_ir(program_code, main_offset_, compiler_settings, registry),
- entries(GenerateShaderEntries(shader_ir)) {}
-
-Shader::~Shader() = default;
-
-VKPipelineCache::VKPipelineCache(RasterizerVulkan& rasterizer_, Tegra::GPU& gpu_,
- Tegra::Engines::Maxwell3D& maxwell3d_,
- Tegra::Engines::KeplerCompute& kepler_compute_,
- Tegra::MemoryManager& gpu_memory_, const Device& device_,
- VKScheduler& scheduler_, VKDescriptorPool& descriptor_pool_,
- VKUpdateDescriptorQueue& update_descriptor_queue_)
- : VideoCommon::ShaderCache<Shader>{rasterizer_}, gpu{gpu_}, maxwell3d{maxwell3d_},
- kepler_compute{kepler_compute_}, gpu_memory{gpu_memory_}, device{device_},
- scheduler{scheduler_}, descriptor_pool{descriptor_pool_}, update_descriptor_queue{
- update_descriptor_queue_} {}
-
-VKPipelineCache::~VKPipelineCache() = default;
+PipelineCache::~PipelineCache() = default;
-std::array<Shader*, Maxwell::MaxShaderProgram> VKPipelineCache::GetShaders() {
- std::array<Shader*, Maxwell::MaxShaderProgram> shaders{};
-
- for (std::size_t index = 0; index < Maxwell::MaxShaderProgram; ++index) {
- const auto program{static_cast<Maxwell::ShaderProgram>(index)};
-
- // Skip stages that are not enabled
- if (!maxwell3d.regs.IsShaderConfigEnabled(index)) {
- continue;
- }
-
- const GPUVAddr gpu_addr{GetShaderAddress(maxwell3d, program)};
- const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr);
- ASSERT(cpu_addr);
-
- Shader* result = cpu_addr ? TryGet(*cpu_addr) : null_shader.get();
- if (!result) {
- const u8* const host_ptr{gpu_memory.GetPointer(gpu_addr)};
-
- // No shader found - create a new one
- static constexpr u32 stage_offset = STAGE_MAIN_OFFSET;
- const auto stage = static_cast<ShaderType>(index == 0 ? 0 : index - 1);
- ProgramCode code = GetShaderCode(gpu_memory, gpu_addr, host_ptr, false);
- const std::size_t size_in_bytes = code.size() * sizeof(u64);
-
- auto shader = std::make_unique<Shader>(maxwell3d, stage, gpu_addr, *cpu_addr,
- std::move(code), stage_offset);
- result = shader.get();
+GraphicsPipeline* PipelineCache::CurrentGraphicsPipeline() {
+ MICROPROFILE_SCOPE(Vulkan_PipelineCache);
- if (cpu_addr) {
- Register(std::move(shader), *cpu_addr, size_in_bytes);
- } else {
- null_shader = std::move(shader);
- }
+ if (!RefreshStages(graphics_key.unique_hashes)) {
+ current_pipeline = nullptr;
+ return nullptr;
+ }
+ graphics_key.state.Refresh(maxwell3d, device.IsExtExtendedDynamicStateSupported(),
+ device.IsExtVertexInputDynamicStateSupported());
+
+ if (current_pipeline) {
+ GraphicsPipeline* const next{current_pipeline->Next(graphics_key)};
+ if (next) {
+ current_pipeline = next;
+ return BuiltPipeline(current_pipeline);
}
- shaders[index] = result;
}
- return last_shaders = shaders;
+ return CurrentGraphicsPipelineSlowPath();
}
-VKGraphicsPipeline* VKPipelineCache::GetGraphicsPipeline(
- const GraphicsPipelineCacheKey& key, u32 num_color_buffers,
- VideoCommon::Shader::AsyncShaders& async_shaders) {
+ComputePipeline* PipelineCache::CurrentComputePipeline() {
MICROPROFILE_SCOPE(Vulkan_PipelineCache);
- if (last_graphics_pipeline && last_graphics_key == key) {
- return last_graphics_pipeline;
- }
- last_graphics_key = key;
-
- if (device.UseAsynchronousShaders() && async_shaders.IsShaderAsync(gpu)) {
- std::unique_lock lock{pipeline_cache};
- const auto [pair, is_cache_miss] = graphics_cache.try_emplace(key);
- if (is_cache_miss) {
- gpu.ShaderNotify().MarkSharderBuilding();
- LOG_INFO(Render_Vulkan, "Compile 0x{:016X}", key.Hash());
- const auto [program, bindings] = DecompileShaders(key.fixed_state);
- async_shaders.QueueVulkanShader(this, device, scheduler, descriptor_pool,
- update_descriptor_queue, bindings, program, key,
- num_color_buffers);
- }
- last_graphics_pipeline = pair->second.get();
- return last_graphics_pipeline;
+ const ShaderInfo* const shader{ComputeShader()};
+ if (!shader) {
+ return nullptr;
}
-
- const auto [pair, is_cache_miss] = graphics_cache.try_emplace(key);
- auto& entry = pair->second;
- if (is_cache_miss) {
- gpu.ShaderNotify().MarkSharderBuilding();
- LOG_INFO(Render_Vulkan, "Compile 0x{:016X}", key.Hash());
- const auto [program, bindings] = DecompileShaders(key.fixed_state);
- entry = std::make_unique<VKGraphicsPipeline>(device, scheduler, descriptor_pool,
- update_descriptor_queue, key, bindings,
- program, num_color_buffers);
- gpu.ShaderNotify().MarkShaderComplete();
+ const auto& qmd{kepler_compute.launch_description};
+ const ComputePipelineCacheKey key{
+ .unique_hash = shader->unique_hash,
+ .shared_memory_size = qmd.shared_alloc,
+ .workgroup_size{qmd.block_dim_x, qmd.block_dim_y, qmd.block_dim_z},
+ };
+ const auto [pair, is_new]{compute_cache.try_emplace(key)};
+ auto& pipeline{pair->second};
+ if (!is_new) {
+ return pipeline.get();
}
- last_graphics_pipeline = entry.get();
- return last_graphics_pipeline;
+ pipeline = CreateComputePipeline(key, shader);
+ return pipeline.get();
}
-VKComputePipeline& VKPipelineCache::GetComputePipeline(const ComputePipelineCacheKey& key) {
- MICROPROFILE_SCOPE(Vulkan_PipelineCache);
-
- const auto [pair, is_cache_miss] = compute_cache.try_emplace(key);
- auto& entry = pair->second;
- if (!is_cache_miss) {
- return *entry;
+void PipelineCache::LoadDiskResources(u64 title_id, std::stop_token stop_loading,
+ const VideoCore::DiskResourceLoadCallback& callback) {
+ if (title_id == 0) {
+ return;
}
- LOG_INFO(Render_Vulkan, "Compile 0x{:016X}", key.Hash());
-
- const GPUVAddr gpu_addr = key.shader;
-
- const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr);
- ASSERT(cpu_addr);
+ const auto shader_dir{Common::FS::GetYuzuPath(Common::FS::YuzuPath::ShaderDir)};
+ const auto base_dir{shader_dir / fmt::format("{:016x}", title_id)};
+ if (!Common::FS::CreateDir(shader_dir) || !Common::FS::CreateDir(base_dir)) {
+ LOG_ERROR(Common_Filesystem, "Failed to create pipeline cache directories");
+ return;
+ }
+ pipeline_cache_filename = base_dir / "vulkan.bin";
+
+ struct {
+ std::mutex mutex;
+ size_t total{};
+ size_t built{};
+ bool has_loaded{};
+ } state;
+
+ const auto load_compute{[&](std::ifstream& file, FileEnvironment env) {
+ ComputePipelineCacheKey key;
+ file.read(reinterpret_cast<char*>(&key), sizeof(key));
+
+ workers.QueueWork([this, key, env = std::move(env), &state, &callback]() mutable {
+ ShaderPools pools;
+ auto pipeline{CreateComputePipeline(pools, key, env, false)};
+ std::lock_guard lock{state.mutex};
+ if (pipeline) {
+ compute_cache.emplace(key, std::move(pipeline));
+ }
+ ++state.built;
+ if (state.has_loaded) {
+ callback(VideoCore::LoadCallbackStage::Build, state.built, state.total);
+ }
+ });
+ ++state.total;
+ }};
+ const bool extended_dynamic_state = device.IsExtExtendedDynamicStateSupported();
+ const bool dynamic_vertex_input = device.IsExtVertexInputDynamicStateSupported();
+ const auto load_graphics{[&](std::ifstream& file, std::vector<FileEnvironment> envs) {
+ GraphicsPipelineCacheKey key;
+ file.read(reinterpret_cast<char*>(&key), sizeof(key));
+
+ if ((key.state.extended_dynamic_state != 0) != extended_dynamic_state ||
+ (key.state.dynamic_vertex_input != 0) != dynamic_vertex_input) {
+ return;
+ }
+ workers.QueueWork([this, key, envs = std::move(envs), &state, &callback]() mutable {
+ ShaderPools pools;
+ boost::container::static_vector<Shader::Environment*, 5> env_ptrs;
+ for (auto& env : envs) {
+ env_ptrs.push_back(&env);
+ }
+ auto pipeline{CreateGraphicsPipeline(pools, key, MakeSpan(env_ptrs), false)};
- Shader* shader = cpu_addr ? TryGet(*cpu_addr) : null_kernel.get();
- if (!shader) {
- // No shader found - create a new one
- const auto host_ptr = gpu_memory.GetPointer(gpu_addr);
+ std::lock_guard lock{state.mutex};
+ graphics_cache.emplace(key, std::move(pipeline));
+ ++state.built;
+ if (state.has_loaded) {
+ callback(VideoCore::LoadCallbackStage::Build, state.built, state.total);
+ }
+ });
+ ++state.total;
+ }};
+ VideoCommon::LoadPipelines(stop_loading, pipeline_cache_filename, CACHE_VERSION, load_compute,
+ load_graphics);
- ProgramCode code = GetShaderCode(gpu_memory, gpu_addr, host_ptr, true);
- const std::size_t size_in_bytes = code.size() * sizeof(u64);
+ std::unique_lock lock{state.mutex};
+ callback(VideoCore::LoadCallbackStage::Build, 0, state.total);
+ state.has_loaded = true;
+ lock.unlock();
- auto shader_info = std::make_unique<Shader>(kepler_compute, ShaderType::Compute, gpu_addr,
- *cpu_addr, std::move(code), KERNEL_MAIN_OFFSET);
- shader = shader_info.get();
+ workers.WaitForRequests();
+}
- if (cpu_addr) {
- Register(std::move(shader_info), *cpu_addr, size_in_bytes);
- } else {
- null_kernel = std::move(shader_info);
- }
+GraphicsPipeline* PipelineCache::CurrentGraphicsPipelineSlowPath() {
+ const auto [pair, is_new]{graphics_cache.try_emplace(graphics_key)};
+ auto& pipeline{pair->second};
+ if (is_new) {
+ pipeline = CreateGraphicsPipeline();
}
-
- const Specialization specialization{
- .base_binding = 0,
- .workgroup_size = key.workgroup_size,
- .shared_memory_size = key.shared_memory_size,
- .point_size = std::nullopt,
- .enabled_attributes = {},
- .attribute_types = {},
- .ndc_minus_one_to_one = false,
- };
- const SPIRVShader spirv_shader{Decompile(device, shader->GetIR(), ShaderType::Compute,
- shader->GetRegistry(), specialization),
- shader->GetEntries()};
- entry = std::make_unique<VKComputePipeline>(device, scheduler, descriptor_pool,
- update_descriptor_queue, spirv_shader);
- return *entry;
+ if (!pipeline) {
+ return nullptr;
+ }
+ if (current_pipeline) {
+ current_pipeline->AddTransition(pipeline.get());
+ }
+ current_pipeline = pipeline.get();
+ return BuiltPipeline(current_pipeline);
}
-void VKPipelineCache::EmplacePipeline(std::unique_ptr<VKGraphicsPipeline> pipeline) {
- gpu.ShaderNotify().MarkShaderComplete();
- std::unique_lock lock{pipeline_cache};
- graphics_cache.at(pipeline->GetCacheKey()) = std::move(pipeline);
+GraphicsPipeline* PipelineCache::BuiltPipeline(GraphicsPipeline* pipeline) const noexcept {
+ if (pipeline->IsBuilt()) {
+ return pipeline;
+ }
+ if (!use_asynchronous_shaders) {
+ return pipeline;
+ }
+ // If something is using depth, we can assume that games are not rendering anything which
+ // will be used one time.
+ if (maxwell3d.regs.zeta_enable) {
+ return nullptr;
+ }
+ // If games are using a small index count, we can assume these are full screen quads.
+ // Usually these shaders are only used once for building textures so we can assume they
+ // can't be built async
+ if (maxwell3d.regs.index_array.count <= 6 || maxwell3d.regs.vertex_buffer.count <= 6) {
+ return pipeline;
+ }
+ return nullptr;
}
-void VKPipelineCache::OnShaderRemoval(Shader* shader) {
- bool finished = false;
- const auto Finish = [&] {
- // TODO(Rodrigo): Instead of finishing here, wait for the fences that use this pipeline and
- // flush.
- if (finished) {
- return;
- }
- finished = true;
- scheduler.Finish();
- };
-
- const GPUVAddr invalidated_addr = shader->GetGpuAddr();
- for (auto it = graphics_cache.begin(); it != graphics_cache.end();) {
- auto& entry = it->first;
- if (std::find(entry.shaders.begin(), entry.shaders.end(), invalidated_addr) ==
- entry.shaders.end()) {
- ++it;
+std::unique_ptr<GraphicsPipeline> PipelineCache::CreateGraphicsPipeline(
+ ShaderPools& pools, const GraphicsPipelineCacheKey& key,
+ std::span<Shader::Environment* const> envs, bool build_in_parallel) try {
+ LOG_INFO(Render_Vulkan, "0x{:016x}", key.Hash());
+ size_t env_index{0};
+ std::array<Shader::IR::Program, Maxwell::MaxShaderProgram> programs;
+ const bool uses_vertex_a{key.unique_hashes[0] != 0};
+ const bool uses_vertex_b{key.unique_hashes[1] != 0};
+ for (size_t index = 0; index < Maxwell::MaxShaderProgram; ++index) {
+ if (key.unique_hashes[index] == 0) {
continue;
}
- Finish();
- it = graphics_cache.erase(it);
+ Shader::Environment& env{*envs[env_index]};
+ ++env_index;
+
+ const u32 cfg_offset{static_cast<u32>(env.StartAddress() + sizeof(Shader::ProgramHeader))};
+ Shader::Maxwell::Flow::CFG cfg(env, pools.flow_block, cfg_offset, index == 0);
+ if (!uses_vertex_a || index != 1) {
+ // Normal path
+ programs[index] = TranslateProgram(pools.inst, pools.block, env, cfg, host_info);
+ } else {
+ // VertexB path when VertexA is present.
+ auto& program_va{programs[0]};
+ auto program_vb{TranslateProgram(pools.inst, pools.block, env, cfg, host_info)};
+ programs[index] = MergeDualVertexPrograms(program_va, program_vb, env);
+ }
}
- for (auto it = compute_cache.begin(); it != compute_cache.end();) {
- auto& entry = it->first;
- if (entry.shader != invalidated_addr) {
- ++it;
+ std::array<const Shader::Info*, Maxwell::MaxShaderStage> infos{};
+ std::array<vk::ShaderModule, Maxwell::MaxShaderStage> modules;
+
+ const Shader::IR::Program* previous_stage{};
+ Shader::Backend::Bindings binding;
+ for (size_t index = uses_vertex_a && uses_vertex_b ? 1 : 0; index < Maxwell::MaxShaderProgram;
+ ++index) {
+ if (key.unique_hashes[index] == 0) {
continue;
}
- Finish();
- it = compute_cache.erase(it);
+ UNIMPLEMENTED_IF(index == 0);
+
+ Shader::IR::Program& program{programs[index]};
+ const size_t stage_index{index - 1};
+ infos[stage_index] = &program.info;
+
+ const auto runtime_info{MakeRuntimeInfo(programs, key, program, previous_stage)};
+ const std::vector<u32> code{EmitSPIRV(profile, runtime_info, program, binding)};
+ device.SaveShader(code);
+ modules[stage_index] = BuildShader(device, code);
+ if (device.HasDebuggingToolAttached()) {
+ const std::string name{fmt::format("Shader {:016x}", key.unique_hashes[index])};
+ modules[stage_index].SetObjectNameEXT(name.c_str());
+ }
+ previous_stage = &program;
}
+ Common::ThreadWorker* const thread_worker{build_in_parallel ? &workers : nullptr};
+ return std::make_unique<GraphicsPipeline>(
+ maxwell3d, gpu_memory, scheduler, buffer_cache, texture_cache, &shader_notify, device,
+ descriptor_pool, update_descriptor_queue, thread_worker, render_pass_cache, key,
+ std::move(modules), infos);
+
+} catch (const Shader::Exception& exception) {
+ LOG_ERROR(Render_Vulkan, "{}", exception.what());
+ return nullptr;
}
-std::pair<SPIRVProgram, std::vector<VkDescriptorSetLayoutBinding>>
-VKPipelineCache::DecompileShaders(const FixedPipelineState& fixed_state) {
- Specialization specialization;
- if (fixed_state.topology == Maxwell::PrimitiveTopology::Points) {
- float point_size;
- std::memcpy(&point_size, &fixed_state.point_size, sizeof(float));
- specialization.point_size = point_size;
- ASSERT(point_size != 0.0f);
- }
- for (std::size_t i = 0; i < Maxwell::NumVertexAttributes; ++i) {
- const auto& attribute = fixed_state.attributes[i];
- specialization.enabled_attributes[i] = attribute.enabled.Value() != 0;
- specialization.attribute_types[i] = attribute.Type();
- }
- specialization.ndc_minus_one_to_one = fixed_state.ndc_minus_one_to_one;
- specialization.early_fragment_tests = fixed_state.early_z;
-
- // Alpha test
- specialization.alpha_test_func =
- FixedPipelineState::UnpackComparisonOp(fixed_state.alpha_test_func.Value());
- specialization.alpha_test_ref = Common::BitCast<float>(fixed_state.alpha_test_ref);
-
- SPIRVProgram program;
- std::vector<VkDescriptorSetLayoutBinding> bindings;
+std::unique_ptr<GraphicsPipeline> PipelineCache::CreateGraphicsPipeline() {
+ GraphicsEnvironments environments;
+ GetGraphicsEnvironments(environments, graphics_key.unique_hashes);
- for (std::size_t index = 1; index < Maxwell::MaxShaderProgram; ++index) {
- const auto program_enum = static_cast<Maxwell::ShaderProgram>(index);
- // Skip stages that are not enabled
- if (!maxwell3d.regs.IsShaderConfigEnabled(index)) {
- continue;
- }
- const GPUVAddr gpu_addr = GetShaderAddress(maxwell3d, program_enum);
- const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr);
- Shader* const shader = cpu_addr ? TryGet(*cpu_addr) : null_shader.get();
-
- const std::size_t stage = index == 0 ? 0 : index - 1; // Stage indices are 0 - 5
- const ShaderType program_type = GetShaderType(program_enum);
- const auto& entries = shader->GetEntries();
- program[stage] = {
- Decompile(device, shader->GetIR(), program_type, shader->GetRegistry(), specialization),
- entries,
- };
-
- const u32 old_binding = specialization.base_binding;
- specialization.base_binding =
- FillDescriptorLayout(entries, bindings, program_enum, specialization.base_binding);
- ASSERT(old_binding + entries.NumBindings() == specialization.base_binding);
+ main_pools.ReleaseContents();
+ auto pipeline{CreateGraphicsPipeline(main_pools, graphics_key, environments.Span(), true)};
+ if (!pipeline || pipeline_cache_filename.empty()) {
+ return pipeline;
}
- return {std::move(program), std::move(bindings)};
-}
-
-template <VkDescriptorType descriptor_type, class Container>
-void AddEntry(std::vector<VkDescriptorUpdateTemplateEntry>& template_entries, u32& binding,
- u32& offset, const Container& container) {
- static constexpr u32 entry_size = static_cast<u32>(sizeof(DescriptorUpdateEntry));
- const u32 count = static_cast<u32>(std::size(container));
-
- if constexpr (descriptor_type == COMBINED_IMAGE_SAMPLER) {
- for (u32 i = 0; i < count; ++i) {
- const u32 num_samplers = container[i].size;
- template_entries.push_back({
- .dstBinding = binding,
- .dstArrayElement = 0,
- .descriptorCount = num_samplers,
- .descriptorType = descriptor_type,
- .offset = offset,
- .stride = entry_size,
- });
-
- ++binding;
- offset += num_samplers * entry_size;
+ serialization_thread.QueueWork([this, key = graphics_key, envs = std::move(environments.envs)] {
+ boost::container::static_vector<const GenericEnvironment*, Maxwell::MaxShaderProgram>
+ env_ptrs;
+ for (size_t index = 0; index < Maxwell::MaxShaderProgram; ++index) {
+ if (key.unique_hashes[index] != 0) {
+ env_ptrs.push_back(&envs[index]);
+ }
}
- return;
- }
+ SerializePipeline(key, env_ptrs, pipeline_cache_filename, CACHE_VERSION);
+ });
+ return pipeline;
+}
- if constexpr (descriptor_type == UNIFORM_TEXEL_BUFFER ||
- descriptor_type == STORAGE_TEXEL_BUFFER) {
- // Nvidia has a bug where updating multiple texels at once causes the driver to crash.
- // Note: Fixed in driver Windows 443.24, Linux 440.66.15
- for (u32 i = 0; i < count; ++i) {
- template_entries.push_back({
- .dstBinding = binding + i,
- .dstArrayElement = 0,
- .descriptorCount = 1,
- .descriptorType = descriptor_type,
- .offset = static_cast<std::size_t>(offset + i * entry_size),
- .stride = entry_size,
- });
- }
- } else if (count > 0) {
- template_entries.push_back({
- .dstBinding = binding,
- .dstArrayElement = 0,
- .descriptorCount = count,
- .descriptorType = descriptor_type,
- .offset = offset,
- .stride = entry_size,
- });
+std::unique_ptr<ComputePipeline> PipelineCache::CreateComputePipeline(
+ const ComputePipelineCacheKey& key, const ShaderInfo* shader) {
+ const GPUVAddr program_base{kepler_compute.regs.code_loc.Address()};
+ const auto& qmd{kepler_compute.launch_description};
+ ComputeEnvironment env{kepler_compute, gpu_memory, program_base, qmd.program_start};
+ env.SetCachedSize(shader->size_bytes);
+
+ main_pools.ReleaseContents();
+ auto pipeline{CreateComputePipeline(main_pools, key, env, true)};
+ if (!pipeline || pipeline_cache_filename.empty()) {
+ return pipeline;
}
- offset += count * entry_size;
- binding += count;
+ serialization_thread.QueueWork([this, key, env = std::move(env)] {
+ SerializePipeline(key, std::array<const GenericEnvironment*, 1>{&env},
+ pipeline_cache_filename, CACHE_VERSION);
+ });
+ return pipeline;
}
-void FillDescriptorUpdateTemplateEntries(
- const ShaderEntries& entries, u32& binding, u32& offset,
- std::vector<VkDescriptorUpdateTemplateEntryKHR>& template_entries) {
- AddEntry<UNIFORM_BUFFER>(template_entries, offset, binding, entries.const_buffers);
- AddEntry<STORAGE_BUFFER>(template_entries, offset, binding, entries.global_buffers);
- AddEntry<UNIFORM_TEXEL_BUFFER>(template_entries, offset, binding, entries.uniform_texels);
- AddEntry<COMBINED_IMAGE_SAMPLER>(template_entries, offset, binding, entries.samplers);
- AddEntry<STORAGE_TEXEL_BUFFER>(template_entries, offset, binding, entries.storage_texels);
- AddEntry<STORAGE_IMAGE>(template_entries, offset, binding, entries.images);
+std::unique_ptr<ComputePipeline> PipelineCache::CreateComputePipeline(
+ ShaderPools& pools, const ComputePipelineCacheKey& key, Shader::Environment& env,
+ bool build_in_parallel) try {
+ LOG_INFO(Render_Vulkan, "0x{:016x}", key.Hash());
+
+ Shader::Maxwell::Flow::CFG cfg{env, pools.flow_block, env.StartAddress()};
+ auto program{TranslateProgram(pools.inst, pools.block, env, cfg, host_info)};
+ const std::vector<u32> code{EmitSPIRV(profile, program)};
+ device.SaveShader(code);
+ vk::ShaderModule spv_module{BuildShader(device, code)};
+ if (device.HasDebuggingToolAttached()) {
+ const auto name{fmt::format("Shader {:016x}", key.unique_hash)};
+ spv_module.SetObjectNameEXT(name.c_str());
+ }
+ Common::ThreadWorker* const thread_worker{build_in_parallel ? &workers : nullptr};
+ return std::make_unique<ComputePipeline>(device, descriptor_pool, update_descriptor_queue,
+ thread_worker, &shader_notify, program.info,
+ std::move(spv_module));
+
+} catch (const Shader::Exception& exception) {
+ LOG_ERROR(Render_Vulkan, "{}", exception.what());
+ return nullptr;
}
} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.h b/src/video_core/renderer_vulkan/vk_pipeline_cache.h
index 89d635a3d..efe5a7ed8 100644
--- a/src/video_core/renderer_vulkan/vk_pipeline_cache.h
+++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.h
@@ -6,24 +6,28 @@
#include <array>
#include <cstddef>
+#include <filesystem>
+#include <iosfwd>
#include <memory>
#include <type_traits>
#include <unordered_map>
#include <utility>
#include <vector>
-#include <boost/functional/hash.hpp>
-
#include "common/common_types.h"
-#include "video_core/engines/const_buffer_engine_interface.h"
+#include "common/thread_worker.h"
+#include "shader_recompiler/frontend/ir/basic_block.h"
+#include "shader_recompiler/frontend/ir/value.h"
+#include "shader_recompiler/frontend/maxwell/control_flow.h"
+#include "shader_recompiler/host_translate_info.h"
+#include "shader_recompiler/object_pool.h"
+#include "shader_recompiler/profile.h"
#include "video_core/engines/maxwell_3d.h"
#include "video_core/renderer_vulkan/fixed_pipeline_state.h"
+#include "video_core/renderer_vulkan/vk_buffer_cache.h"
+#include "video_core/renderer_vulkan/vk_compute_pipeline.h"
#include "video_core/renderer_vulkan/vk_graphics_pipeline.h"
-#include "video_core/renderer_vulkan/vk_shader_decompiler.h"
-#include "video_core/shader/async_shaders.h"
-#include "video_core/shader/memory_util.h"
-#include "video_core/shader/registry.h"
-#include "video_core/shader/shader_ir.h"
+#include "video_core/renderer_vulkan/vk_texture_cache.h"
#include "video_core/shader_cache.h"
#include "video_core/vulkan_common/vulkan_wrapper.h"
@@ -31,23 +35,24 @@ namespace Core {
class System;
}
-namespace Vulkan {
+namespace Shader::IR {
+struct Program;
+}
-class Device;
-class RasterizerVulkan;
-class VKComputePipeline;
-class VKDescriptorPool;
-class VKScheduler;
-class VKUpdateDescriptorQueue;
+namespace VideoCore {
+class ShaderNotify;
+}
+
+namespace Vulkan {
using Maxwell = Tegra::Engines::Maxwell3D::Regs;
struct ComputePipelineCacheKey {
- GPUVAddr shader;
+ u64 unique_hash;
u32 shared_memory_size;
std::array<u32, 3> workgroup_size;
- std::size_t Hash() const noexcept;
+ size_t Hash() const noexcept;
bool operator==(const ComputePipelineCacheKey& rhs) const noexcept;
@@ -64,15 +69,8 @@ static_assert(std::is_trivially_constructible_v<ComputePipelineCacheKey>);
namespace std {
template <>
-struct hash<Vulkan::GraphicsPipelineCacheKey> {
- std::size_t operator()(const Vulkan::GraphicsPipelineCacheKey& k) const noexcept {
- return k.Hash();
- }
-};
-
-template <>
struct hash<Vulkan::ComputePipelineCacheKey> {
- std::size_t operator()(const Vulkan::ComputePipelineCacheKey& k) const noexcept {
+ size_t operator()(const Vulkan::ComputePipelineCacheKey& k) const noexcept {
return k.Hash();
}
};
@@ -81,94 +79,90 @@ struct hash<Vulkan::ComputePipelineCacheKey> {
namespace Vulkan {
-class Shader {
-public:
- explicit Shader(Tegra::Engines::ConstBufferEngineInterface& engine_,
- Tegra::Engines::ShaderType stage_, GPUVAddr gpu_addr, VAddr cpu_addr_,
- VideoCommon::Shader::ProgramCode program_code, u32 main_offset_);
- ~Shader();
-
- GPUVAddr GetGpuAddr() const {
- return gpu_addr;
- }
-
- VideoCommon::Shader::ShaderIR& GetIR() {
- return shader_ir;
- }
-
- const VideoCommon::Shader::ShaderIR& GetIR() const {
- return shader_ir;
- }
+class ComputePipeline;
+class Device;
+class DescriptorPool;
+class RasterizerVulkan;
+class RenderPassCache;
+class VKScheduler;
+class VKUpdateDescriptorQueue;
- const VideoCommon::Shader::Registry& GetRegistry() const {
- return registry;
- }
+using VideoCommon::ShaderInfo;
- const ShaderEntries& GetEntries() const {
- return entries;
+struct ShaderPools {
+ void ReleaseContents() {
+ flow_block.ReleaseContents();
+ block.ReleaseContents();
+ inst.ReleaseContents();
}
-private:
- GPUVAddr gpu_addr{};
- VideoCommon::Shader::ProgramCode program_code;
- VideoCommon::Shader::Registry registry;
- VideoCommon::Shader::ShaderIR shader_ir;
- ShaderEntries entries;
+ Shader::ObjectPool<Shader::IR::Inst> inst;
+ Shader::ObjectPool<Shader::IR::Block> block;
+ Shader::ObjectPool<Shader::Maxwell::Flow::Block> flow_block;
};
-class VKPipelineCache final : public VideoCommon::ShaderCache<Shader> {
+class PipelineCache : public VideoCommon::ShaderCache {
public:
- explicit VKPipelineCache(RasterizerVulkan& rasterizer, Tegra::GPU& gpu,
- Tegra::Engines::Maxwell3D& maxwell3d,
- Tegra::Engines::KeplerCompute& kepler_compute,
- Tegra::MemoryManager& gpu_memory, const Device& device,
- VKScheduler& scheduler, VKDescriptorPool& descriptor_pool,
- VKUpdateDescriptorQueue& update_descriptor_queue);
- ~VKPipelineCache() override;
+ explicit PipelineCache(RasterizerVulkan& rasterizer, Tegra::Engines::Maxwell3D& maxwell3d,
+ Tegra::Engines::KeplerCompute& kepler_compute,
+ Tegra::MemoryManager& gpu_memory, const Device& device,
+ VKScheduler& scheduler, DescriptorPool& descriptor_pool,
+ VKUpdateDescriptorQueue& update_descriptor_queue,
+ RenderPassCache& render_pass_cache, BufferCache& buffer_cache,
+ TextureCache& texture_cache, VideoCore::ShaderNotify& shader_notify_);
+ ~PipelineCache();
+
+ [[nodiscard]] GraphicsPipeline* CurrentGraphicsPipeline();
- std::array<Shader*, Maxwell::MaxShaderProgram> GetShaders();
+ [[nodiscard]] ComputePipeline* CurrentComputePipeline();
- VKGraphicsPipeline* GetGraphicsPipeline(const GraphicsPipelineCacheKey& key,
- u32 num_color_buffers,
- VideoCommon::Shader::AsyncShaders& async_shaders);
+ void LoadDiskResources(u64 title_id, std::stop_token stop_loading,
+ const VideoCore::DiskResourceLoadCallback& callback);
- VKComputePipeline& GetComputePipeline(const ComputePipelineCacheKey& key);
+private:
+ [[nodiscard]] GraphicsPipeline* CurrentGraphicsPipelineSlowPath();
- void EmplacePipeline(std::unique_ptr<VKGraphicsPipeline> pipeline);
+ [[nodiscard]] GraphicsPipeline* BuiltPipeline(GraphicsPipeline* pipeline) const noexcept;
-protected:
- void OnShaderRemoval(Shader* shader) final;
+ std::unique_ptr<GraphicsPipeline> CreateGraphicsPipeline();
-private:
- std::pair<SPIRVProgram, std::vector<VkDescriptorSetLayoutBinding>> DecompileShaders(
- const FixedPipelineState& fixed_state);
+ std::unique_ptr<GraphicsPipeline> CreateGraphicsPipeline(
+ ShaderPools& pools, const GraphicsPipelineCacheKey& key,
+ std::span<Shader::Environment* const> envs, bool build_in_parallel);
- Tegra::GPU& gpu;
- Tegra::Engines::Maxwell3D& maxwell3d;
- Tegra::Engines::KeplerCompute& kepler_compute;
- Tegra::MemoryManager& gpu_memory;
+ std::unique_ptr<ComputePipeline> CreateComputePipeline(const ComputePipelineCacheKey& key,
+ const ShaderInfo* shader);
+
+ std::unique_ptr<ComputePipeline> CreateComputePipeline(ShaderPools& pools,
+ const ComputePipelineCacheKey& key,
+ Shader::Environment& env,
+ bool build_in_parallel);
const Device& device;
VKScheduler& scheduler;
- VKDescriptorPool& descriptor_pool;
+ DescriptorPool& descriptor_pool;
VKUpdateDescriptorQueue& update_descriptor_queue;
+ RenderPassCache& render_pass_cache;
+ BufferCache& buffer_cache;
+ TextureCache& texture_cache;
+ VideoCore::ShaderNotify& shader_notify;
+ bool use_asynchronous_shaders{};
- std::unique_ptr<Shader> null_shader;
- std::unique_ptr<Shader> null_kernel;
+ GraphicsPipelineCacheKey graphics_key{};
+ GraphicsPipeline* current_pipeline{};
- std::array<Shader*, Maxwell::MaxShaderProgram> last_shaders{};
+ std::unordered_map<ComputePipelineCacheKey, std::unique_ptr<ComputePipeline>> compute_cache;
+ std::unordered_map<GraphicsPipelineCacheKey, std::unique_ptr<GraphicsPipeline>> graphics_cache;
- GraphicsPipelineCacheKey last_graphics_key;
- VKGraphicsPipeline* last_graphics_pipeline = nullptr;
+ ShaderPools main_pools;
- std::mutex pipeline_cache;
- std::unordered_map<GraphicsPipelineCacheKey, std::unique_ptr<VKGraphicsPipeline>>
- graphics_cache;
- std::unordered_map<ComputePipelineCacheKey, std::unique_ptr<VKComputePipeline>> compute_cache;
-};
+ Shader::Profile profile;
+ Shader::HostTranslateInfo host_info;
-void FillDescriptorUpdateTemplateEntries(
- const ShaderEntries& entries, u32& binding, u32& offset,
- std::vector<VkDescriptorUpdateTemplateEntryKHR>& template_entries);
+ std::filesystem::path pipeline_cache_filename;
+
+ Common::ThreadWorker workers;
+ Common::ThreadWorker serialization_thread;
+};
} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_query_cache.cpp b/src/video_core/renderer_vulkan/vk_query_cache.cpp
index 7cadd5147..c9cb32d71 100644
--- a/src/video_core/renderer_vulkan/vk_query_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_query_cache.cpp
@@ -114,14 +114,10 @@ void HostCounter::EndQuery() {
}
u64 HostCounter::BlockingQuery() const {
- if (tick >= cache.GetScheduler().CurrentTick()) {
- cache.GetScheduler().Flush();
- }
-
+ cache.GetScheduler().Wait(tick);
u64 data;
const VkResult query_result = cache.GetDevice().GetLogical().GetQueryResults(
- query.first, query.second, 1, sizeof(data), &data, sizeof(data),
- VK_QUERY_RESULT_64_BIT | VK_QUERY_RESULT_WAIT_BIT);
+ query.first, query.second, 1, sizeof(data), &data, sizeof(data), VK_QUERY_RESULT_64_BIT);
switch (query_result) {
case VK_SUCCESS:
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
index f57c15b37..c7a07fdd8 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
@@ -24,7 +24,6 @@
#include "video_core/renderer_vulkan/vk_buffer_cache.h"
#include "video_core/renderer_vulkan/vk_compute_pipeline.h"
#include "video_core/renderer_vulkan/vk_descriptor_pool.h"
-#include "video_core/renderer_vulkan/vk_graphics_pipeline.h"
#include "video_core/renderer_vulkan/vk_pipeline_cache.h"
#include "video_core/renderer_vulkan/vk_rasterizer.h"
#include "video_core/renderer_vulkan/vk_scheduler.h"
@@ -55,11 +54,10 @@ struct DrawParams {
u32 num_instances;
u32 base_vertex;
u32 num_vertices;
+ u32 first_index;
bool is_indexed;
};
-constexpr auto COMPUTE_SHADER_INDEX = static_cast<size_t>(Tegra::Engines::ShaderType::Compute);
-
VkViewport GetViewportState(const Device& device, const Maxwell& regs, size_t index) {
const auto& src = regs.viewport_transform[index];
const float width = src.scale_x * 2.0f;
@@ -97,118 +95,6 @@ VkRect2D GetScissorState(const Maxwell& regs, size_t index) {
return scissor;
}
-std::array<GPUVAddr, Maxwell::MaxShaderProgram> GetShaderAddresses(
- const std::array<Shader*, Maxwell::MaxShaderProgram>& shaders) {
- std::array<GPUVAddr, Maxwell::MaxShaderProgram> addresses;
- for (size_t i = 0; i < std::size(addresses); ++i) {
- addresses[i] = shaders[i] ? shaders[i]->GetGpuAddr() : 0;
- }
- return addresses;
-}
-
-struct TextureHandle {
- constexpr TextureHandle(u32 data, bool via_header_index) {
- const Tegra::Texture::TextureHandle handle{data};
- image = handle.tic_id;
- sampler = via_header_index ? image : handle.tsc_id.Value();
- }
-
- u32 image;
- u32 sampler;
-};
-
-template <typename Engine, typename Entry>
-TextureHandle GetTextureInfo(const Engine& engine, bool via_header_index, const Entry& entry,
- size_t stage, size_t index = 0) {
- const auto shader_type = static_cast<Tegra::Engines::ShaderType>(stage);
- if constexpr (std::is_same_v<Entry, SamplerEntry>) {
- if (entry.is_separated) {
- const u32 buffer_1 = entry.buffer;
- const u32 buffer_2 = entry.secondary_buffer;
- const u32 offset_1 = entry.offset;
- const u32 offset_2 = entry.secondary_offset;
- const u32 handle_1 = engine.AccessConstBuffer32(shader_type, buffer_1, offset_1);
- const u32 handle_2 = engine.AccessConstBuffer32(shader_type, buffer_2, offset_2);
- return TextureHandle(handle_1 | handle_2, via_header_index);
- }
- }
- if (entry.is_bindless) {
- const u32 raw = engine.AccessConstBuffer32(shader_type, entry.buffer, entry.offset);
- return TextureHandle(raw, via_header_index);
- }
- const u32 buffer = engine.GetBoundBuffer();
- const u64 offset = (entry.offset + index) * sizeof(u32);
- return TextureHandle(engine.AccessConstBuffer32(shader_type, buffer, offset), via_header_index);
-}
-
-ImageViewType ImageViewTypeFromEntry(const SamplerEntry& entry) {
- if (entry.is_buffer) {
- return ImageViewType::e2D;
- }
- switch (entry.type) {
- case Tegra::Shader::TextureType::Texture1D:
- return entry.is_array ? ImageViewType::e1DArray : ImageViewType::e1D;
- case Tegra::Shader::TextureType::Texture2D:
- return entry.is_array ? ImageViewType::e2DArray : ImageViewType::e2D;
- case Tegra::Shader::TextureType::Texture3D:
- return ImageViewType::e3D;
- case Tegra::Shader::TextureType::TextureCube:
- return entry.is_array ? ImageViewType::CubeArray : ImageViewType::Cube;
- }
- UNREACHABLE();
- return ImageViewType::e2D;
-}
-
-ImageViewType ImageViewTypeFromEntry(const ImageEntry& entry) {
- switch (entry.type) {
- case Tegra::Shader::ImageType::Texture1D:
- return ImageViewType::e1D;
- case Tegra::Shader::ImageType::Texture1DArray:
- return ImageViewType::e1DArray;
- case Tegra::Shader::ImageType::Texture2D:
- return ImageViewType::e2D;
- case Tegra::Shader::ImageType::Texture2DArray:
- return ImageViewType::e2DArray;
- case Tegra::Shader::ImageType::Texture3D:
- return ImageViewType::e3D;
- case Tegra::Shader::ImageType::TextureBuffer:
- return ImageViewType::Buffer;
- }
- UNREACHABLE();
- return ImageViewType::e2D;
-}
-
-void PushImageDescriptors(const ShaderEntries& entries, TextureCache& texture_cache,
- VKUpdateDescriptorQueue& update_descriptor_queue,
- ImageViewId*& image_view_id_ptr, VkSampler*& sampler_ptr) {
- for ([[maybe_unused]] const auto& entry : entries.uniform_texels) {
- const ImageViewId image_view_id = *image_view_id_ptr++;
- const ImageView& image_view = texture_cache.GetImageView(image_view_id);
- update_descriptor_queue.AddTexelBuffer(image_view.BufferView());
- }
- for (const auto& entry : entries.samplers) {
- for (size_t i = 0; i < entry.size; ++i) {
- const VkSampler sampler = *sampler_ptr++;
- const ImageViewId image_view_id = *image_view_id_ptr++;
- const ImageView& image_view = texture_cache.GetImageView(image_view_id);
- const VkImageView handle = image_view.Handle(ImageViewTypeFromEntry(entry));
- update_descriptor_queue.AddSampledImage(handle, sampler);
- }
- }
- for ([[maybe_unused]] const auto& entry : entries.storage_texels) {
- const ImageViewId image_view_id = *image_view_id_ptr++;
- const ImageView& image_view = texture_cache.GetImageView(image_view_id);
- update_descriptor_queue.AddTexelBuffer(image_view.BufferView());
- }
- for (const auto& entry : entries.images) {
- // TODO: Mark as modified
- const ImageViewId image_view_id = *image_view_id_ptr++;
- const ImageView& image_view = texture_cache.GetImageView(image_view_id);
- const VkImageView handle = image_view.Handle(ImageViewTypeFromEntry(entry));
- update_descriptor_queue.AddImage(handle);
- }
-}
-
DrawParams MakeDrawParams(const Maxwell& regs, u32 num_instances, bool is_instanced,
bool is_indexed) {
DrawParams params{
@@ -216,6 +102,7 @@ DrawParams MakeDrawParams(const Maxwell& regs, u32 num_instances, bool is_instan
.num_instances = is_instanced ? num_instances : 1,
.base_vertex = is_indexed ? regs.vb_element_base : regs.vertex_buffer.first,
.num_vertices = is_indexed ? regs.index_array.count : regs.vertex_buffer.count,
+ .first_index = is_indexed ? regs.index_array.first : 0,
.is_indexed = is_indexed,
};
if (regs.draw.topology == Maxwell::PrimitiveTopology::Quads) {
@@ -243,21 +130,21 @@ RasterizerVulkan::RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra
blit_image(device, scheduler, state_tracker, descriptor_pool),
astc_decoder_pass(device, scheduler, descriptor_pool, staging_pool, update_descriptor_queue,
memory_allocator),
- texture_cache_runtime{device, scheduler, memory_allocator,
- staging_pool, blit_image, astc_decoder_pass},
+ render_pass_cache(device), texture_cache_runtime{device, scheduler,
+ memory_allocator, staging_pool,
+ blit_image, astc_decoder_pass,
+ render_pass_cache},
texture_cache(texture_cache_runtime, *this, maxwell3d, kepler_compute, gpu_memory),
buffer_cache_runtime(device, memory_allocator, scheduler, staging_pool,
update_descriptor_queue, descriptor_pool),
buffer_cache(*this, maxwell3d, kepler_compute, gpu_memory, cpu_memory_, buffer_cache_runtime),
- pipeline_cache(*this, gpu, maxwell3d, kepler_compute, gpu_memory, device, scheduler,
- descriptor_pool, update_descriptor_queue),
+ pipeline_cache(*this, maxwell3d, kepler_compute, gpu_memory, device, scheduler,
+ descriptor_pool, update_descriptor_queue, render_pass_cache, buffer_cache,
+ texture_cache, gpu.ShaderNotify()),
query_cache{*this, maxwell3d, gpu_memory, device, scheduler}, accelerate_dma{buffer_cache},
fence_manager(*this, gpu, texture_cache, buffer_cache, query_cache, device, scheduler),
- wfi_event(device.GetLogical().CreateEvent()), async_shaders(emu_window_) {
+ wfi_event(device.GetLogical().CreateEvent()) {
scheduler.SetQueryCache(query_cache);
- if (device.UseAsynchronousShaders()) {
- async_shaders.AllocateWorkers();
- }
}
RasterizerVulkan::~RasterizerVulkan() = default;
@@ -270,53 +157,30 @@ void RasterizerVulkan::Draw(bool is_indexed, bool is_instanced) {
query_cache.UpdateCounters();
- graphics_key.fixed_state.Refresh(maxwell3d, device.IsExtExtendedDynamicStateSupported());
-
- std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
-
- texture_cache.SynchronizeGraphicsDescriptors();
- texture_cache.UpdateRenderTargets(false);
-
- const auto shaders = pipeline_cache.GetShaders();
- graphics_key.shaders = GetShaderAddresses(shaders);
-
- SetupShaderDescriptors(shaders, is_indexed);
-
- const Framebuffer* const framebuffer = texture_cache.GetFramebuffer();
- graphics_key.renderpass = framebuffer->RenderPass();
-
- VKGraphicsPipeline* const pipeline = pipeline_cache.GetGraphicsPipeline(
- graphics_key, framebuffer->NumColorBuffers(), async_shaders);
- if (pipeline == nullptr || pipeline->GetHandle() == VK_NULL_HANDLE) {
- // Async graphics pipeline was not ready.
+ GraphicsPipeline* const pipeline{pipeline_cache.CurrentGraphicsPipeline()};
+ if (!pipeline) {
return;
}
+ std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
+ pipeline->Configure(is_indexed);
BeginTransformFeedback();
- scheduler.RequestRenderpass(framebuffer);
- scheduler.BindGraphicsPipeline(pipeline->GetHandle());
UpdateDynamicStates();
- const auto& regs = maxwell3d.regs;
- const u32 num_instances = maxwell3d.mme_draw.instance_count;
- const DrawParams draw_params = MakeDrawParams(regs, num_instances, is_instanced, is_indexed);
- const VkPipelineLayout pipeline_layout = pipeline->GetLayout();
- const VkDescriptorSet descriptor_set = pipeline->CommitDescriptorSet();
- scheduler.Record([pipeline_layout, descriptor_set, draw_params](vk::CommandBuffer cmdbuf) {
- if (descriptor_set) {
- cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout,
- DESCRIPTOR_SET, descriptor_set, nullptr);
- }
+ const auto& regs{maxwell3d.regs};
+ const u32 num_instances{maxwell3d.mme_draw.instance_count};
+ const DrawParams draw_params{MakeDrawParams(regs, num_instances, is_instanced, is_indexed)};
+ scheduler.Record([draw_params](vk::CommandBuffer cmdbuf) {
if (draw_params.is_indexed) {
- cmdbuf.DrawIndexed(draw_params.num_vertices, draw_params.num_instances, 0,
- draw_params.base_vertex, draw_params.base_instance);
+ cmdbuf.DrawIndexed(draw_params.num_vertices, draw_params.num_instances,
+ draw_params.first_index, draw_params.base_vertex,
+ draw_params.base_instance);
} else {
cmdbuf.Draw(draw_params.num_vertices, draw_params.num_instances,
draw_params.base_vertex, draw_params.base_instance);
}
});
-
EndTransformFeedback();
}
@@ -326,6 +190,7 @@ void RasterizerVulkan::Clear() {
if (!maxwell3d.ShouldExecute()) {
return;
}
+ FlushWork();
query_cache.UpdateCounters();
@@ -395,73 +260,20 @@ void RasterizerVulkan::Clear() {
});
}
-void RasterizerVulkan::DispatchCompute(GPUVAddr code_addr) {
- MICROPROFILE_SCOPE(Vulkan_Compute);
-
- query_cache.UpdateCounters();
+void RasterizerVulkan::DispatchCompute() {
+ FlushWork();
- const auto& launch_desc = kepler_compute.launch_description;
- auto& pipeline = pipeline_cache.GetComputePipeline({
- .shader = code_addr,
- .shared_memory_size = launch_desc.shared_alloc,
- .workgroup_size{
- launch_desc.block_dim_x,
- launch_desc.block_dim_y,
- launch_desc.block_dim_z,
- },
- });
+ ComputePipeline* const pipeline{pipeline_cache.CurrentComputePipeline()};
+ if (!pipeline) {
+ return;
+ }
+ std::scoped_lock lock{texture_cache.mutex, buffer_cache.mutex};
+ pipeline->Configure(kepler_compute, gpu_memory, scheduler, buffer_cache, texture_cache);
- // Compute dispatches can't be executed inside a renderpass
+ const auto& qmd{kepler_compute.launch_description};
+ const std::array<u32, 3> dim{qmd.grid_dim_x, qmd.grid_dim_y, qmd.grid_dim_z};
scheduler.RequestOutsideRenderPassOperationContext();
-
- image_view_indices.clear();
- sampler_handles.clear();
-
- std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
-
- const auto& entries = pipeline.GetEntries();
- buffer_cache.SetEnabledComputeUniformBuffers(entries.enabled_uniform_buffers);
- buffer_cache.UnbindComputeStorageBuffers();
- u32 ssbo_index = 0;
- for (const auto& buffer : entries.global_buffers) {
- buffer_cache.BindComputeStorageBuffer(ssbo_index, buffer.cbuf_index, buffer.cbuf_offset,
- buffer.is_written);
- ++ssbo_index;
- }
- buffer_cache.UpdateComputeBuffers();
-
- texture_cache.SynchronizeComputeDescriptors();
-
- SetupComputeUniformTexels(entries);
- SetupComputeTextures(entries);
- SetupComputeStorageTexels(entries);
- SetupComputeImages(entries);
-
- const std::span indices_span(image_view_indices.data(), image_view_indices.size());
- texture_cache.FillComputeImageViews(indices_span, image_view_ids);
-
- update_descriptor_queue.Acquire();
-
- buffer_cache.BindHostComputeBuffers();
-
- ImageViewId* image_view_id_ptr = image_view_ids.data();
- VkSampler* sampler_ptr = sampler_handles.data();
- PushImageDescriptors(entries, texture_cache, update_descriptor_queue, image_view_id_ptr,
- sampler_ptr);
-
- const VkPipeline pipeline_handle = pipeline.GetHandle();
- const VkPipelineLayout pipeline_layout = pipeline.GetLayout();
- const VkDescriptorSet descriptor_set = pipeline.CommitDescriptorSet();
- scheduler.Record([grid_x = launch_desc.grid_dim_x, grid_y = launch_desc.grid_dim_y,
- grid_z = launch_desc.grid_dim_z, pipeline_handle, pipeline_layout,
- descriptor_set](vk::CommandBuffer cmdbuf) {
- cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, pipeline_handle);
- if (descriptor_set) {
- cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_COMPUTE, pipeline_layout,
- DESCRIPTOR_SET, descriptor_set, nullptr);
- }
- cmdbuf.Dispatch(grid_x, grid_y, grid_z);
- });
+ scheduler.Record([dim](vk::CommandBuffer cmdbuf) { cmdbuf.Dispatch(dim[0], dim[1], dim[2]); });
}
void RasterizerVulkan::ResetCounter(VideoCore::QueryType type) {
@@ -626,6 +438,7 @@ void RasterizerVulkan::WaitForIdle() {
void RasterizerVulkan::FragmentBarrier() {
// We already put barriers when a render pass finishes
+ scheduler.RequestOutsideRenderPassOperationContext();
}
void RasterizerVulkan::TiledCacheBarrier() {
@@ -633,10 +446,11 @@ void RasterizerVulkan::TiledCacheBarrier() {
}
void RasterizerVulkan::FlushCommands() {
- if (draw_counter > 0) {
- draw_counter = 0;
- scheduler.Flush();
+ if (draw_counter == 0) {
+ return;
}
+ draw_counter = 0;
+ scheduler.Flush();
}
void RasterizerVulkan::TickFrame() {
@@ -676,13 +490,18 @@ bool RasterizerVulkan::AccelerateDisplay(const Tegra::FramebufferConfig& config,
if (!image_view) {
return false;
}
- screen_info.image_view = image_view->Handle(VideoCommon::ImageViewType::e2D);
+ screen_info.image_view = image_view->Handle(Shader::TextureType::Color2D);
screen_info.width = image_view->size.width;
screen_info.height = image_view->size.height;
screen_info.is_srgb = VideoCore::Surface::IsPixelFormatSRGB(image_view->format);
return true;
}
+void RasterizerVulkan::LoadDiskResources(u64 title_id, std::stop_token stop_loading,
+ const VideoCore::DiskResourceLoadCallback& callback) {
+ pipeline_cache.LoadDiskResources(title_id, stop_loading, callback);
+}
+
void RasterizerVulkan::FlushWork() {
static constexpr u32 DRAWS_TO_DISPATCH = 4096;
@@ -691,13 +510,11 @@ void RasterizerVulkan::FlushWork() {
if ((++draw_counter & 7) != 7) {
return;
}
-
if (draw_counter < DRAWS_TO_DISPATCH) {
// Send recorded tasks to the worker thread
scheduler.DispatchWork();
return;
}
-
// Otherwise (every certain number of draws) flush execution.
// This submits commands to the Vulkan driver.
scheduler.Flush();
@@ -716,52 +533,6 @@ bool AccelerateDMA::BufferCopy(GPUVAddr src_address, GPUVAddr dest_address, u64
return buffer_cache.DMACopy(src_address, dest_address, amount);
}
-void RasterizerVulkan::SetupShaderDescriptors(
- const std::array<Shader*, Maxwell::MaxShaderProgram>& shaders, bool is_indexed) {
- image_view_indices.clear();
- sampler_handles.clear();
- for (size_t stage = 0; stage < Maxwell::MaxShaderStage; ++stage) {
- Shader* const shader = shaders[stage + 1];
- if (!shader) {
- continue;
- }
- const ShaderEntries& entries = shader->GetEntries();
- SetupGraphicsUniformTexels(entries, stage);
- SetupGraphicsTextures(entries, stage);
- SetupGraphicsStorageTexels(entries, stage);
- SetupGraphicsImages(entries, stage);
-
- buffer_cache.SetEnabledUniformBuffers(stage, entries.enabled_uniform_buffers);
- buffer_cache.UnbindGraphicsStorageBuffers(stage);
- u32 ssbo_index = 0;
- for (const auto& buffer : entries.global_buffers) {
- buffer_cache.BindGraphicsStorageBuffer(stage, ssbo_index, buffer.cbuf_index,
- buffer.cbuf_offset, buffer.is_written);
- ++ssbo_index;
- }
- }
- const std::span indices_span(image_view_indices.data(), image_view_indices.size());
- buffer_cache.UpdateGraphicsBuffers(is_indexed);
- texture_cache.FillGraphicsImageViews(indices_span, image_view_ids);
-
- buffer_cache.BindHostGeometryBuffers(is_indexed);
-
- update_descriptor_queue.Acquire();
-
- ImageViewId* image_view_id_ptr = image_view_ids.data();
- VkSampler* sampler_ptr = sampler_handles.data();
- for (size_t stage = 0; stage < Maxwell::MaxShaderStage; ++stage) {
- // Skip VertexA stage
- Shader* const shader = shaders[stage + 1];
- if (!shader) {
- continue;
- }
- buffer_cache.BindHostStageBuffers(stage);
- PushImageDescriptors(shader->GetEntries(), texture_cache, update_descriptor_queue,
- image_view_id_ptr, sampler_ptr);
- }
-}
-
void RasterizerVulkan::UpdateDynamicStates() {
auto& regs = maxwell3d.regs;
UpdateViewportsState(regs);
@@ -770,6 +541,7 @@ void RasterizerVulkan::UpdateDynamicStates() {
UpdateBlendConstants(regs);
UpdateDepthBounds(regs);
UpdateStencilFaces(regs);
+ UpdateLineWidth(regs);
if (device.IsExtExtendedDynamicStateSupported()) {
UpdateCullMode(regs);
UpdateDepthBoundsTestEnable(regs);
@@ -779,6 +551,9 @@ void RasterizerVulkan::UpdateDynamicStates() {
UpdateFrontFace(regs);
UpdateStencilOp(regs);
UpdateStencilTestEnable(regs);
+ if (device.IsExtVertexInputDynamicStateSupported()) {
+ UpdateVertexInput(regs);
+ }
}
}
@@ -810,89 +585,6 @@ void RasterizerVulkan::EndTransformFeedback() {
[](vk::CommandBuffer cmdbuf) { cmdbuf.EndTransformFeedbackEXT(0, 0, nullptr, nullptr); });
}
-void RasterizerVulkan::SetupGraphicsUniformTexels(const ShaderEntries& entries, size_t stage) {
- const auto& regs = maxwell3d.regs;
- const bool via_header_index = regs.sampler_index == Maxwell::SamplerIndex::ViaHeaderIndex;
- for (const auto& entry : entries.uniform_texels) {
- const TextureHandle handle = GetTextureInfo(maxwell3d, via_header_index, entry, stage);
- image_view_indices.push_back(handle.image);
- }
-}
-
-void RasterizerVulkan::SetupGraphicsTextures(const ShaderEntries& entries, size_t stage) {
- const auto& regs = maxwell3d.regs;
- const bool via_header_index = regs.sampler_index == Maxwell::SamplerIndex::ViaHeaderIndex;
- for (const auto& entry : entries.samplers) {
- for (size_t index = 0; index < entry.size; ++index) {
- const TextureHandle handle =
- GetTextureInfo(maxwell3d, via_header_index, entry, stage, index);
- image_view_indices.push_back(handle.image);
-
- Sampler* const sampler = texture_cache.GetGraphicsSampler(handle.sampler);
- sampler_handles.push_back(sampler->Handle());
- }
- }
-}
-
-void RasterizerVulkan::SetupGraphicsStorageTexels(const ShaderEntries& entries, size_t stage) {
- const auto& regs = maxwell3d.regs;
- const bool via_header_index = regs.sampler_index == Maxwell::SamplerIndex::ViaHeaderIndex;
- for (const auto& entry : entries.storage_texels) {
- const TextureHandle handle = GetTextureInfo(maxwell3d, via_header_index, entry, stage);
- image_view_indices.push_back(handle.image);
- }
-}
-
-void RasterizerVulkan::SetupGraphicsImages(const ShaderEntries& entries, size_t stage) {
- const auto& regs = maxwell3d.regs;
- const bool via_header_index = regs.sampler_index == Maxwell::SamplerIndex::ViaHeaderIndex;
- for (const auto& entry : entries.images) {
- const TextureHandle handle = GetTextureInfo(maxwell3d, via_header_index, entry, stage);
- image_view_indices.push_back(handle.image);
- }
-}
-
-void RasterizerVulkan::SetupComputeUniformTexels(const ShaderEntries& entries) {
- const bool via_header_index = kepler_compute.launch_description.linked_tsc;
- for (const auto& entry : entries.uniform_texels) {
- const TextureHandle handle =
- GetTextureInfo(kepler_compute, via_header_index, entry, COMPUTE_SHADER_INDEX);
- image_view_indices.push_back(handle.image);
- }
-}
-
-void RasterizerVulkan::SetupComputeTextures(const ShaderEntries& entries) {
- const bool via_header_index = kepler_compute.launch_description.linked_tsc;
- for (const auto& entry : entries.samplers) {
- for (size_t index = 0; index < entry.size; ++index) {
- const TextureHandle handle = GetTextureInfo(kepler_compute, via_header_index, entry,
- COMPUTE_SHADER_INDEX, index);
- image_view_indices.push_back(handle.image);
-
- Sampler* const sampler = texture_cache.GetComputeSampler(handle.sampler);
- sampler_handles.push_back(sampler->Handle());
- }
- }
-}
-
-void RasterizerVulkan::SetupComputeStorageTexels(const ShaderEntries& entries) {
- const bool via_header_index = kepler_compute.launch_description.linked_tsc;
- for (const auto& entry : entries.storage_texels) {
- const TextureHandle handle =
- GetTextureInfo(kepler_compute, via_header_index, entry, COMPUTE_SHADER_INDEX);
- image_view_indices.push_back(handle.image);
- }
-}
-
-void RasterizerVulkan::SetupComputeImages(const ShaderEntries& entries) {
- const bool via_header_index = kepler_compute.launch_description.linked_tsc;
- for (const auto& entry : entries.images) {
- const TextureHandle handle =
- GetTextureInfo(kepler_compute, via_header_index, entry, COMPUTE_SHADER_INDEX);
- image_view_indices.push_back(handle.image);
- }
-}
-
void RasterizerVulkan::UpdateViewportsState(Tegra::Engines::Maxwell3D::Regs& regs) {
if (!state_tracker.TouchViewports()) {
return;
@@ -985,6 +677,14 @@ void RasterizerVulkan::UpdateStencilFaces(Tegra::Engines::Maxwell3D::Regs& regs)
}
}
+void RasterizerVulkan::UpdateLineWidth(Tegra::Engines::Maxwell3D::Regs& regs) {
+ if (!state_tracker.TouchLineWidth()) {
+ return;
+ }
+ const float width = regs.line_smooth_enable ? regs.line_width_smooth : regs.line_width_aliased;
+ scheduler.Record([width](vk::CommandBuffer cmdbuf) { cmdbuf.SetLineWidth(width); });
+}
+
void RasterizerVulkan::UpdateCullMode(Tegra::Engines::Maxwell3D::Regs& regs) {
if (!state_tracker.TouchCullMode()) {
return;
@@ -999,6 +699,11 @@ void RasterizerVulkan::UpdateDepthBoundsTestEnable(Tegra::Engines::Maxwell3D::Re
if (!state_tracker.TouchDepthBoundsTestEnable()) {
return;
}
+ bool enabled = regs.depth_bounds_enable;
+ if (enabled && !device.IsDepthBoundsSupported()) {
+ LOG_WARNING(Render_Vulkan, "Depth bounds is enabled but not supported");
+ enabled = false;
+ }
scheduler.Record([enable = regs.depth_bounds_enable](vk::CommandBuffer cmdbuf) {
cmdbuf.SetDepthBoundsTestEnableEXT(enable);
});
@@ -1086,4 +791,62 @@ void RasterizerVulkan::UpdateStencilTestEnable(Tegra::Engines::Maxwell3D::Regs&
});
}
+void RasterizerVulkan::UpdateVertexInput(Tegra::Engines::Maxwell3D::Regs& regs) {
+ auto& dirty{maxwell3d.dirty.flags};
+ if (!dirty[Dirty::VertexInput]) {
+ return;
+ }
+ dirty[Dirty::VertexInput] = false;
+
+ boost::container::static_vector<VkVertexInputBindingDescription2EXT, 32> bindings;
+ boost::container::static_vector<VkVertexInputAttributeDescription2EXT, 32> attributes;
+
+ // There seems to be a bug on Nvidia's driver where updating only higher attributes ends up
+ // generating dirty state. Track the highest dirty attribute and update all attributes until
+ // that one.
+ size_t highest_dirty_attr{};
+ for (size_t index = 0; index < Maxwell::NumVertexAttributes; ++index) {
+ if (dirty[Dirty::VertexAttribute0 + index]) {
+ highest_dirty_attr = index;
+ }
+ }
+ for (size_t index = 0; index < highest_dirty_attr; ++index) {
+ const Maxwell::VertexAttribute attribute{regs.vertex_attrib_format[index]};
+ const u32 binding{attribute.buffer};
+ dirty[Dirty::VertexAttribute0 + index] = false;
+ dirty[Dirty::VertexBinding0 + static_cast<size_t>(binding)] = true;
+ if (!attribute.constant) {
+ attributes.push_back({
+ .sType = VK_STRUCTURE_TYPE_VERTEX_INPUT_ATTRIBUTE_DESCRIPTION_2_EXT,
+ .pNext = nullptr,
+ .location = static_cast<u32>(index),
+ .binding = binding,
+ .format = MaxwellToVK::VertexFormat(attribute.type, attribute.size),
+ .offset = attribute.offset,
+ });
+ }
+ }
+ for (size_t index = 0; index < Maxwell::NumVertexAttributes; ++index) {
+ if (!dirty[Dirty::VertexBinding0 + index]) {
+ continue;
+ }
+ dirty[Dirty::VertexBinding0 + index] = false;
+
+ const u32 binding{static_cast<u32>(index)};
+ const auto& input_binding{regs.vertex_array[binding]};
+ const bool is_instanced{regs.instanced_arrays.IsInstancingEnabled(binding)};
+ bindings.push_back({
+ .sType = VK_STRUCTURE_TYPE_VERTEX_INPUT_BINDING_DESCRIPTION_2_EXT,
+ .pNext = nullptr,
+ .binding = binding,
+ .stride = input_binding.stride,
+ .inputRate = is_instanced ? VK_VERTEX_INPUT_RATE_INSTANCE : VK_VERTEX_INPUT_RATE_VERTEX,
+ .divisor = is_instanced ? input_binding.divisor : 1,
+ });
+ }
+ scheduler.Record([bindings, attributes](vk::CommandBuffer cmdbuf) {
+ cmdbuf.SetVertexInputEXT(bindings, attributes);
+ });
+}
+
} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.h b/src/video_core/renderer_vulkan/vk_rasterizer.h
index 2065209be..866827247 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.h
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.h
@@ -21,14 +21,13 @@
#include "video_core/renderer_vulkan/vk_buffer_cache.h"
#include "video_core/renderer_vulkan/vk_descriptor_pool.h"
#include "video_core/renderer_vulkan/vk_fence_manager.h"
-#include "video_core/renderer_vulkan/vk_graphics_pipeline.h"
#include "video_core/renderer_vulkan/vk_pipeline_cache.h"
#include "video_core/renderer_vulkan/vk_query_cache.h"
+#include "video_core/renderer_vulkan/vk_render_pass_cache.h"
#include "video_core/renderer_vulkan/vk_scheduler.h"
#include "video_core/renderer_vulkan/vk_staging_buffer_pool.h"
#include "video_core/renderer_vulkan/vk_texture_cache.h"
#include "video_core/renderer_vulkan/vk_update_descriptor.h"
-#include "video_core/shader/async_shaders.h"
#include "video_core/vulkan_common/vulkan_memory_allocator.h"
#include "video_core/vulkan_common/vulkan_wrapper.h"
@@ -73,7 +72,7 @@ public:
void Draw(bool is_indexed, bool is_instanced) override;
void Clear() override;
- void DispatchCompute(GPUVAddr code_addr) override;
+ void DispatchCompute() override;
void ResetCounter(VideoCore::QueryType type) override;
void Query(GPUVAddr gpu_addr, VideoCore::QueryType type, std::optional<u64> timestamp) override;
void BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr, u32 size) override;
@@ -102,19 +101,8 @@ public:
Tegra::Engines::AccelerateDMAInterface& AccessAccelerateDMA() override;
bool AccelerateDisplay(const Tegra::FramebufferConfig& config, VAddr framebuffer_addr,
u32 pixel_stride) override;
-
- VideoCommon::Shader::AsyncShaders& GetAsyncShaders() {
- return async_shaders;
- }
-
- const VideoCommon::Shader::AsyncShaders& GetAsyncShaders() const {
- return async_shaders;
- }
-
- /// Maximum supported size that a constbuffer can have in bytes.
- static constexpr size_t MaxConstbufferSize = 0x10000;
- static_assert(MaxConstbufferSize % (4 * sizeof(float)) == 0,
- "The maximum size of a constbuffer must be a multiple of the size of GLvec4");
+ void LoadDiskResources(u64 title_id, std::stop_token stop_loading,
+ const VideoCore::DiskResourceLoadCallback& callback) override;
private:
static constexpr size_t MAX_TEXTURES = 192;
@@ -125,46 +113,19 @@ private:
void FlushWork();
- /// Setup descriptors in the graphics pipeline.
- void SetupShaderDescriptors(const std::array<Shader*, Maxwell::MaxShaderProgram>& shaders,
- bool is_indexed);
-
void UpdateDynamicStates();
void BeginTransformFeedback();
void EndTransformFeedback();
- /// Setup uniform texels in the graphics pipeline.
- void SetupGraphicsUniformTexels(const ShaderEntries& entries, std::size_t stage);
-
- /// Setup textures in the graphics pipeline.
- void SetupGraphicsTextures(const ShaderEntries& entries, std::size_t stage);
-
- /// Setup storage texels in the graphics pipeline.
- void SetupGraphicsStorageTexels(const ShaderEntries& entries, std::size_t stage);
-
- /// Setup images in the graphics pipeline.
- void SetupGraphicsImages(const ShaderEntries& entries, std::size_t stage);
-
- /// Setup texel buffers in the compute pipeline.
- void SetupComputeUniformTexels(const ShaderEntries& entries);
-
- /// Setup textures in the compute pipeline.
- void SetupComputeTextures(const ShaderEntries& entries);
-
- /// Setup storage texels in the compute pipeline.
- void SetupComputeStorageTexels(const ShaderEntries& entries);
-
- /// Setup images in the compute pipeline.
- void SetupComputeImages(const ShaderEntries& entries);
-
void UpdateViewportsState(Tegra::Engines::Maxwell3D::Regs& regs);
void UpdateScissorsState(Tegra::Engines::Maxwell3D::Regs& regs);
void UpdateDepthBias(Tegra::Engines::Maxwell3D::Regs& regs);
void UpdateBlendConstants(Tegra::Engines::Maxwell3D::Regs& regs);
void UpdateDepthBounds(Tegra::Engines::Maxwell3D::Regs& regs);
void UpdateStencilFaces(Tegra::Engines::Maxwell3D::Regs& regs);
+ void UpdateLineWidth(Tegra::Engines::Maxwell3D::Regs& regs);
void UpdateCullMode(Tegra::Engines::Maxwell3D::Regs& regs);
void UpdateDepthBoundsTestEnable(Tegra::Engines::Maxwell3D::Regs& regs);
@@ -175,6 +136,8 @@ private:
void UpdateStencilOp(Tegra::Engines::Maxwell3D::Regs& regs);
void UpdateStencilTestEnable(Tegra::Engines::Maxwell3D::Regs& regs);
+ void UpdateVertexInput(Tegra::Engines::Maxwell3D::Regs& regs);
+
Tegra::GPU& gpu;
Tegra::MemoryManager& gpu_memory;
Tegra::Engines::Maxwell3D& maxwell3d;
@@ -187,24 +150,22 @@ private:
VKScheduler& scheduler;
StagingBufferPool staging_pool;
- VKDescriptorPool descriptor_pool;
+ DescriptorPool descriptor_pool;
VKUpdateDescriptorQueue update_descriptor_queue;
BlitImageHelper blit_image;
ASTCDecoderPass astc_decoder_pass;
-
- GraphicsPipelineCacheKey graphics_key;
+ RenderPassCache render_pass_cache;
TextureCacheRuntime texture_cache_runtime;
TextureCache texture_cache;
BufferCacheRuntime buffer_cache_runtime;
BufferCache buffer_cache;
- VKPipelineCache pipeline_cache;
+ PipelineCache pipeline_cache;
VKQueryCache query_cache;
AccelerateDMA accelerate_dma;
VKFenceManager fence_manager;
vk::Event wfi_event;
- VideoCommon::Shader::AsyncShaders async_shaders;
boost::container::static_vector<u32, MAX_IMAGE_VIEWS> image_view_indices;
std::array<VideoCommon::ImageViewId, MAX_IMAGE_VIEWS> image_view_ids;
diff --git a/src/video_core/renderer_vulkan/vk_render_pass_cache.cpp b/src/video_core/renderer_vulkan/vk_render_pass_cache.cpp
new file mode 100644
index 000000000..451ffe019
--- /dev/null
+++ b/src/video_core/renderer_vulkan/vk_render_pass_cache.cpp
@@ -0,0 +1,96 @@
+// Copyright 2021 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include <unordered_map>
+
+#include <boost/container/static_vector.hpp>
+
+#include "video_core/renderer_vulkan/maxwell_to_vk.h"
+#include "video_core/renderer_vulkan/vk_render_pass_cache.h"
+#include "video_core/surface.h"
+#include "video_core/vulkan_common/vulkan_device.h"
+#include "video_core/vulkan_common/vulkan_wrapper.h"
+
+namespace Vulkan {
+namespace {
+using VideoCore::Surface::PixelFormat;
+
+VkAttachmentDescription AttachmentDescription(const Device& device, PixelFormat format,
+ VkSampleCountFlagBits samples) {
+ using MaxwellToVK::SurfaceFormat;
+ return {
+ .flags = VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT,
+ .format = SurfaceFormat(device, FormatType::Optimal, true, format).format,
+ .samples = samples,
+ .loadOp = VK_ATTACHMENT_LOAD_OP_LOAD,
+ .storeOp = VK_ATTACHMENT_STORE_OP_STORE,
+ .stencilLoadOp = VK_ATTACHMENT_LOAD_OP_LOAD,
+ .stencilStoreOp = VK_ATTACHMENT_STORE_OP_STORE,
+ .initialLayout = VK_IMAGE_LAYOUT_GENERAL,
+ .finalLayout = VK_IMAGE_LAYOUT_GENERAL,
+ };
+}
+} // Anonymous namespace
+
+RenderPassCache::RenderPassCache(const Device& device_) : device{&device_} {}
+
+VkRenderPass RenderPassCache::Get(const RenderPassKey& key) {
+ std::lock_guard lock{mutex};
+ const auto [pair, is_new] = cache.try_emplace(key);
+ if (!is_new) {
+ return *pair->second;
+ }
+ boost::container::static_vector<VkAttachmentDescription, 9> descriptions;
+ std::array<VkAttachmentReference, 8> references{};
+ u32 num_attachments{};
+ u32 num_colors{};
+ for (size_t index = 0; index < key.color_formats.size(); ++index) {
+ const PixelFormat format{key.color_formats[index]};
+ const bool is_valid{format != PixelFormat::Invalid};
+ references[index] = VkAttachmentReference{
+ .attachment = is_valid ? num_colors : VK_ATTACHMENT_UNUSED,
+ .layout = VK_IMAGE_LAYOUT_GENERAL,
+ };
+ if (is_valid) {
+ descriptions.push_back(AttachmentDescription(*device, format, key.samples));
+ num_attachments = static_cast<u32>(index + 1);
+ ++num_colors;
+ }
+ }
+ const bool has_depth{key.depth_format != PixelFormat::Invalid};
+ VkAttachmentReference depth_reference{};
+ if (key.depth_format != PixelFormat::Invalid) {
+ depth_reference = VkAttachmentReference{
+ .attachment = num_colors,
+ .layout = VK_IMAGE_LAYOUT_GENERAL,
+ };
+ descriptions.push_back(AttachmentDescription(*device, key.depth_format, key.samples));
+ }
+ const VkSubpassDescription subpass{
+ .flags = 0,
+ .pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS,
+ .inputAttachmentCount = 0,
+ .pInputAttachments = nullptr,
+ .colorAttachmentCount = num_attachments,
+ .pColorAttachments = references.data(),
+ .pResolveAttachments = nullptr,
+ .pDepthStencilAttachment = has_depth ? &depth_reference : nullptr,
+ .preserveAttachmentCount = 0,
+ .pPreserveAttachments = nullptr,
+ };
+ pair->second = device->GetLogical().CreateRenderPass({
+ .sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,
+ .pNext = nullptr,
+ .flags = 0,
+ .attachmentCount = static_cast<u32>(descriptions.size()),
+ .pAttachments = descriptions.empty() ? nullptr : descriptions.data(),
+ .subpassCount = 1,
+ .pSubpasses = &subpass,
+ .dependencyCount = 0,
+ .pDependencies = nullptr,
+ });
+ return *pair->second;
+}
+
+} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_render_pass_cache.h b/src/video_core/renderer_vulkan/vk_render_pass_cache.h
new file mode 100644
index 000000000..eaa0ed775
--- /dev/null
+++ b/src/video_core/renderer_vulkan/vk_render_pass_cache.h
@@ -0,0 +1,55 @@
+// Copyright 2021 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <mutex>
+#include <unordered_map>
+
+#include "video_core/surface.h"
+#include "video_core/vulkan_common/vulkan_wrapper.h"
+
+namespace Vulkan {
+
+struct RenderPassKey {
+ auto operator<=>(const RenderPassKey&) const noexcept = default;
+
+ std::array<VideoCore::Surface::PixelFormat, 8> color_formats;
+ VideoCore::Surface::PixelFormat depth_format;
+ VkSampleCountFlagBits samples;
+};
+
+} // namespace Vulkan
+
+namespace std {
+template <>
+struct hash<Vulkan::RenderPassKey> {
+ [[nodiscard]] size_t operator()(const Vulkan::RenderPassKey& key) const noexcept {
+ size_t value = static_cast<size_t>(key.depth_format) << 48;
+ value ^= static_cast<size_t>(key.samples) << 52;
+ for (size_t i = 0; i < key.color_formats.size(); ++i) {
+ value ^= static_cast<size_t>(key.color_formats[i]) << (i * 6);
+ }
+ return value;
+ }
+};
+} // namespace std
+
+namespace Vulkan {
+
+class Device;
+
+class RenderPassCache {
+public:
+ explicit RenderPassCache(const Device& device_);
+
+ VkRenderPass Get(const RenderPassKey& key);
+
+private:
+ const Device* device{};
+ std::unordered_map<RenderPassKey, vk::RenderPass> cache;
+ std::mutex mutex;
+};
+
+} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_resource_pool.cpp b/src/video_core/renderer_vulkan/vk_resource_pool.cpp
index a8bf7bda8..2dd514968 100644
--- a/src/video_core/renderer_vulkan/vk_resource_pool.cpp
+++ b/src/video_core/renderer_vulkan/vk_resource_pool.cpp
@@ -10,18 +10,16 @@
namespace Vulkan {
ResourcePool::ResourcePool(MasterSemaphore& master_semaphore_, size_t grow_step_)
- : master_semaphore{master_semaphore_}, grow_step{grow_step_} {}
-
-ResourcePool::~ResourcePool() = default;
+ : master_semaphore{&master_semaphore_}, grow_step{grow_step_} {}
size_t ResourcePool::CommitResource() {
// Refresh semaphore to query updated results
- master_semaphore.Refresh();
- const u64 gpu_tick = master_semaphore.KnownGpuTick();
+ master_semaphore->Refresh();
+ const u64 gpu_tick = master_semaphore->KnownGpuTick();
const auto search = [this, gpu_tick](size_t begin, size_t end) -> std::optional<size_t> {
for (size_t iterator = begin; iterator < end; ++iterator) {
if (gpu_tick >= ticks[iterator]) {
- ticks[iterator] = master_semaphore.CurrentTick();
+ ticks[iterator] = master_semaphore->CurrentTick();
return iterator;
}
}
@@ -36,7 +34,7 @@ size_t ResourcePool::CommitResource() {
// Both searches failed, the pool is full; handle it.
const size_t free_resource = ManageOverflow();
- ticks[free_resource] = master_semaphore.CurrentTick();
+ ticks[free_resource] = master_semaphore->CurrentTick();
found = free_resource;
}
}
diff --git a/src/video_core/renderer_vulkan/vk_resource_pool.h b/src/video_core/renderer_vulkan/vk_resource_pool.h
index 9d0bb3b4d..f0b80ad59 100644
--- a/src/video_core/renderer_vulkan/vk_resource_pool.h
+++ b/src/video_core/renderer_vulkan/vk_resource_pool.h
@@ -18,8 +18,16 @@ class MasterSemaphore;
*/
class ResourcePool {
public:
+ explicit ResourcePool() = default;
explicit ResourcePool(MasterSemaphore& master_semaphore, size_t grow_step);
- virtual ~ResourcePool();
+
+ virtual ~ResourcePool() = default;
+
+ ResourcePool& operator=(ResourcePool&&) noexcept = default;
+ ResourcePool(ResourcePool&&) noexcept = default;
+
+ ResourcePool& operator=(const ResourcePool&) = default;
+ ResourcePool(const ResourcePool&) = default;
protected:
size_t CommitResource();
@@ -34,7 +42,7 @@ private:
/// Allocates a new page of resources.
void Grow();
- MasterSemaphore& master_semaphore;
+ MasterSemaphore* master_semaphore{};
size_t grow_step = 0; ///< Number of new resources created after an overflow
size_t hint_iterator = 0; ///< Hint to where the next free resources is likely to be found
std::vector<u64> ticks; ///< Ticks for each resource
diff --git a/src/video_core/renderer_vulkan/vk_scheduler.cpp b/src/video_core/renderer_vulkan/vk_scheduler.cpp
index f35c120b0..4840962de 100644
--- a/src/video_core/renderer_vulkan/vk_scheduler.cpp
+++ b/src/video_core/renderer_vulkan/vk_scheduler.cpp
@@ -31,7 +31,7 @@ void VKScheduler::CommandChunk::ExecuteAll(vk::CommandBuffer cmdbuf) {
command->~Command();
command = next;
}
-
+ submit = false;
command_offset = 0;
first = nullptr;
last = nullptr;
@@ -42,13 +42,16 @@ VKScheduler::VKScheduler(const Device& device_, StateTracker& state_tracker_)
master_semaphore{std::make_unique<MasterSemaphore>(device)},
command_pool{std::make_unique<CommandPool>(*master_semaphore, device)} {
AcquireNewChunk();
- AllocateNewContext();
+ AllocateWorkerCommandBuffer();
worker_thread = std::thread(&VKScheduler::WorkerThread, this);
}
VKScheduler::~VKScheduler() {
- quit = true;
- cv.notify_all();
+ {
+ std::lock_guard lock{work_mutex};
+ quit = true;
+ }
+ work_cv.notify_all();
worker_thread.join();
}
@@ -60,6 +63,7 @@ void VKScheduler::Flush(VkSemaphore semaphore) {
void VKScheduler::Finish(VkSemaphore semaphore) {
const u64 presubmit_tick = CurrentTick();
SubmitExecution(semaphore);
+ WaitWorker();
Wait(presubmit_tick);
AllocateNewContext();
}
@@ -68,20 +72,19 @@ void VKScheduler::WaitWorker() {
MICROPROFILE_SCOPE(Vulkan_WaitForWorker);
DispatchWork();
- bool finished = false;
- do {
- cv.notify_all();
- std::unique_lock lock{mutex};
- finished = chunk_queue.Empty();
- } while (!finished);
+ std::unique_lock lock{work_mutex};
+ wait_cv.wait(lock, [this] { return work_queue.empty(); });
}
void VKScheduler::DispatchWork() {
if (chunk->Empty()) {
return;
}
- chunk_queue.Push(std::move(chunk));
- cv.notify_all();
+ {
+ std::lock_guard lock{work_mutex};
+ work_queue.push(std::move(chunk));
+ }
+ work_cv.notify_one();
AcquireNewChunk();
}
@@ -124,93 +127,101 @@ void VKScheduler::RequestOutsideRenderPassOperationContext() {
EndRenderPass();
}
-void VKScheduler::BindGraphicsPipeline(VkPipeline pipeline) {
+bool VKScheduler::UpdateGraphicsPipeline(GraphicsPipeline* pipeline) {
if (state.graphics_pipeline == pipeline) {
- return;
+ return false;
}
state.graphics_pipeline = pipeline;
- Record([pipeline](vk::CommandBuffer cmdbuf) {
- cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline);
- });
+ return true;
}
void VKScheduler::WorkerThread() {
- Common::SetCurrentThreadPriority(Common::ThreadPriority::High);
- std::unique_lock lock{mutex};
+ Common::SetCurrentThreadName("yuzu:VulkanWorker");
do {
- cv.wait(lock, [this] { return !chunk_queue.Empty() || quit; });
- if (quit) {
- continue;
+ if (work_queue.empty()) {
+ wait_cv.notify_all();
+ }
+ std::unique_ptr<CommandChunk> work;
+ {
+ std::unique_lock lock{work_mutex};
+ work_cv.wait(lock, [this] { return !work_queue.empty() || quit; });
+ if (quit) {
+ continue;
+ }
+ work = std::move(work_queue.front());
+ work_queue.pop();
+ }
+ const bool has_submit = work->HasSubmit();
+ work->ExecuteAll(current_cmdbuf);
+ if (has_submit) {
+ AllocateWorkerCommandBuffer();
}
- auto extracted_chunk = std::move(chunk_queue.Front());
- chunk_queue.Pop();
- extracted_chunk->ExecuteAll(current_cmdbuf);
- chunk_reserve.Push(std::move(extracted_chunk));
+ std::lock_guard reserve_lock{reserve_mutex};
+ chunk_reserve.push_back(std::move(work));
} while (!quit);
}
+void VKScheduler::AllocateWorkerCommandBuffer() {
+ current_cmdbuf = vk::CommandBuffer(command_pool->Commit(), device.GetDispatchLoader());
+ current_cmdbuf.Begin({
+ .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
+ .pNext = nullptr,
+ .flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT,
+ .pInheritanceInfo = nullptr,
+ });
+}
+
void VKScheduler::SubmitExecution(VkSemaphore semaphore) {
EndPendingOperations();
InvalidateState();
- WaitWorker();
- std::unique_lock lock{mutex};
+ const u64 signal_value = master_semaphore->NextTick();
+ Record([semaphore, signal_value, this](vk::CommandBuffer cmdbuf) {
+ cmdbuf.End();
- current_cmdbuf.End();
+ const u32 num_signal_semaphores = semaphore ? 2U : 1U;
- const VkSemaphore timeline_semaphore = master_semaphore->Handle();
- const u32 num_signal_semaphores = semaphore ? 2U : 1U;
+ const u64 wait_value = signal_value - 1;
+ const VkPipelineStageFlags wait_stage_mask = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
- const u64 signal_value = master_semaphore->CurrentTick();
- const u64 wait_value = signal_value - 1;
- const VkPipelineStageFlags wait_stage_mask = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
+ const VkSemaphore timeline_semaphore = master_semaphore->Handle();
+ const std::array signal_values{signal_value, u64(0)};
+ const std::array signal_semaphores{timeline_semaphore, semaphore};
- master_semaphore->NextTick();
-
- const std::array signal_values{signal_value, u64(0)};
- const std::array signal_semaphores{timeline_semaphore, semaphore};
-
- const VkTimelineSemaphoreSubmitInfoKHR timeline_si{
- .sType = VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO_KHR,
- .pNext = nullptr,
- .waitSemaphoreValueCount = 1,
- .pWaitSemaphoreValues = &wait_value,
- .signalSemaphoreValueCount = num_signal_semaphores,
- .pSignalSemaphoreValues = signal_values.data(),
- };
- const VkSubmitInfo submit_info{
- .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
- .pNext = &timeline_si,
- .waitSemaphoreCount = 1,
- .pWaitSemaphores = &timeline_semaphore,
- .pWaitDstStageMask = &wait_stage_mask,
- .commandBufferCount = 1,
- .pCommandBuffers = current_cmdbuf.address(),
- .signalSemaphoreCount = num_signal_semaphores,
- .pSignalSemaphores = signal_semaphores.data(),
- };
- switch (const VkResult result = device.GetGraphicsQueue().Submit(submit_info)) {
- case VK_SUCCESS:
- break;
- case VK_ERROR_DEVICE_LOST:
- device.ReportLoss();
- [[fallthrough]];
- default:
- vk::Check(result);
- }
+ const VkTimelineSemaphoreSubmitInfoKHR timeline_si{
+ .sType = VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO_KHR,
+ .pNext = nullptr,
+ .waitSemaphoreValueCount = 1,
+ .pWaitSemaphoreValues = &wait_value,
+ .signalSemaphoreValueCount = num_signal_semaphores,
+ .pSignalSemaphoreValues = signal_values.data(),
+ };
+ const VkSubmitInfo submit_info{
+ .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
+ .pNext = &timeline_si,
+ .waitSemaphoreCount = 1,
+ .pWaitSemaphores = &timeline_semaphore,
+ .pWaitDstStageMask = &wait_stage_mask,
+ .commandBufferCount = 1,
+ .pCommandBuffers = cmdbuf.address(),
+ .signalSemaphoreCount = num_signal_semaphores,
+ .pSignalSemaphores = signal_semaphores.data(),
+ };
+ switch (const VkResult result = device.GetGraphicsQueue().Submit(submit_info)) {
+ case VK_SUCCESS:
+ break;
+ case VK_ERROR_DEVICE_LOST:
+ device.ReportLoss();
+ [[fallthrough]];
+ default:
+ vk::Check(result);
+ }
+ });
+ chunk->MarkSubmit();
+ DispatchWork();
}
void VKScheduler::AllocateNewContext() {
- std::unique_lock lock{mutex};
-
- current_cmdbuf = vk::CommandBuffer(command_pool->Commit(), device.GetDispatchLoader());
- current_cmdbuf.Begin({
- .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
- .pNext = nullptr,
- .flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT,
- .pInheritanceInfo = nullptr,
- });
-
// Enable counters once again. These are disabled when a command buffer is finished.
if (query_cache) {
query_cache->UpdateCounters();
@@ -265,12 +276,13 @@ void VKScheduler::EndRenderPass() {
}
void VKScheduler::AcquireNewChunk() {
- if (chunk_reserve.Empty()) {
+ std::lock_guard lock{reserve_mutex};
+ if (chunk_reserve.empty()) {
chunk = std::make_unique<CommandChunk>();
return;
}
- chunk = std::move(chunk_reserve.Front());
- chunk_reserve.Pop();
+ chunk = std::move(chunk_reserve.back());
+ chunk_reserve.pop_back();
}
} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_scheduler.h b/src/video_core/renderer_vulkan/vk_scheduler.h
index 3ce48e9d2..cf39a2363 100644
--- a/src/video_core/renderer_vulkan/vk_scheduler.h
+++ b/src/video_core/renderer_vulkan/vk_scheduler.h
@@ -8,12 +8,12 @@
#include <condition_variable>
#include <cstddef>
#include <memory>
-#include <stack>
#include <thread>
#include <utility>
+#include <queue>
+
#include "common/alignment.h"
#include "common/common_types.h"
-#include "common/threadsafe_queue.h"
#include "video_core/renderer_vulkan/vk_master_semaphore.h"
#include "video_core/vulkan_common/vulkan_wrapper.h"
@@ -22,6 +22,7 @@ namespace Vulkan {
class CommandPool;
class Device;
class Framebuffer;
+class GraphicsPipeline;
class StateTracker;
class VKQueryCache;
@@ -52,8 +53,8 @@ public:
/// of a renderpass.
void RequestOutsideRenderPassOperationContext();
- /// Binds a pipeline to the current execution context.
- void BindGraphicsPipeline(VkPipeline pipeline);
+ /// Update the pipeline to the current execution context.
+ bool UpdateGraphicsPipeline(GraphicsPipeline* pipeline);
/// Invalidates current command buffer state except for render passes
void InvalidateState();
@@ -85,6 +86,10 @@ public:
/// Waits for the given tick to trigger on the GPU.
void Wait(u64 tick) {
+ if (tick >= master_semaphore->CurrentTick()) {
+ // Make sure we are not waiting for the current tick without signalling
+ Flush();
+ }
master_semaphore->Wait(tick);
}
@@ -154,15 +159,24 @@ private:
return true;
}
+ void MarkSubmit() {
+ submit = true;
+ }
+
bool Empty() const {
return command_offset == 0;
}
+ bool HasSubmit() const {
+ return submit;
+ }
+
private:
Command* first = nullptr;
Command* last = nullptr;
size_t command_offset = 0;
+ bool submit = false;
alignas(std::max_align_t) std::array<u8, 0x8000> data{};
};
@@ -170,11 +184,13 @@ private:
VkRenderPass renderpass = nullptr;
VkFramebuffer framebuffer = nullptr;
VkExtent2D render_area = {0, 0};
- VkPipeline graphics_pipeline = nullptr;
+ GraphicsPipeline* graphics_pipeline = nullptr;
};
void WorkerThread();
+ void AllocateWorkerCommandBuffer();
+
void SubmitExecution(VkSemaphore semaphore);
void AllocateNewContext();
@@ -204,11 +220,13 @@ private:
std::array<VkImage, 9> renderpass_images{};
std::array<VkImageSubresourceRange, 9> renderpass_image_ranges{};
- Common::SPSCQueue<std::unique_ptr<CommandChunk>> chunk_queue;
- Common::SPSCQueue<std::unique_ptr<CommandChunk>> chunk_reserve;
- std::mutex mutex;
- std::condition_variable cv;
- bool quit = false;
+ std::queue<std::unique_ptr<CommandChunk>> work_queue;
+ std::vector<std::unique_ptr<CommandChunk>> chunk_reserve;
+ std::mutex reserve_mutex;
+ std::mutex work_mutex;
+ std::condition_variable work_cv;
+ std::condition_variable wait_cv;
+ std::atomic_bool quit{};
};
} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp b/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp
deleted file mode 100644
index c6846d886..000000000
--- a/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp
+++ /dev/null
@@ -1,3166 +0,0 @@
-// Copyright 2019 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
-
-#include <functional>
-#include <limits>
-#include <map>
-#include <optional>
-#include <type_traits>
-#include <unordered_map>
-#include <utility>
-
-#include <fmt/format.h>
-
-#include <sirit/sirit.h>
-
-#include "common/alignment.h"
-#include "common/assert.h"
-#include "common/common_types.h"
-#include "common/logging/log.h"
-#include "video_core/engines/maxwell_3d.h"
-#include "video_core/engines/shader_bytecode.h"
-#include "video_core/engines/shader_header.h"
-#include "video_core/engines/shader_type.h"
-#include "video_core/renderer_vulkan/vk_shader_decompiler.h"
-#include "video_core/shader/node.h"
-#include "video_core/shader/shader_ir.h"
-#include "video_core/shader/transform_feedback.h"
-#include "video_core/vulkan_common/vulkan_device.h"
-
-namespace Vulkan {
-
-namespace {
-
-using Sirit::Id;
-using Tegra::Engines::ShaderType;
-using Tegra::Shader::Attribute;
-using Tegra::Shader::PixelImap;
-using Tegra::Shader::Register;
-using namespace VideoCommon::Shader;
-
-using Maxwell = Tegra::Engines::Maxwell3D::Regs;
-using Operation = const OperationNode&;
-
-class ASTDecompiler;
-class ExprDecompiler;
-
-// TODO(Rodrigo): Use rasterizer's value
-constexpr u32 MaxConstBufferFloats = 0x4000;
-constexpr u32 MaxConstBufferElements = MaxConstBufferFloats / 4;
-
-constexpr u32 NumInputPatches = 32; // This value seems to be the standard
-
-enum class Type { Void, Bool, Bool2, Float, Int, Uint, HalfFloat };
-
-class Expression final {
-public:
- Expression(Id id_, Type type_) : id{id_}, type{type_} {
- ASSERT(type_ != Type::Void);
- }
- Expression() : type{Type::Void} {}
-
- Id id{};
- Type type{};
-};
-static_assert(std::is_standard_layout_v<Expression>);
-
-struct TexelBuffer {
- Id image_type{};
- Id image{};
-};
-
-struct SampledImage {
- Id image_type{};
- Id sampler_type{};
- Id sampler_pointer_type{};
- Id variable{};
-};
-
-struct StorageImage {
- Id image_type{};
- Id image{};
-};
-
-struct AttributeType {
- Type type;
- Id scalar;
- Id vector;
-};
-
-struct VertexIndices {
- std::optional<u32> position;
- std::optional<u32> layer;
- std::optional<u32> viewport;
- std::optional<u32> point_size;
- std::optional<u32> clip_distances;
-};
-
-struct GenericVaryingDescription {
- Id id = nullptr;
- u32 first_element = 0;
- bool is_scalar = false;
-};
-
-spv::Dim GetSamplerDim(const SamplerEntry& sampler) {
- ASSERT(!sampler.is_buffer);
- switch (sampler.type) {
- case Tegra::Shader::TextureType::Texture1D:
- return spv::Dim::Dim1D;
- case Tegra::Shader::TextureType::Texture2D:
- return spv::Dim::Dim2D;
- case Tegra::Shader::TextureType::Texture3D:
- return spv::Dim::Dim3D;
- case Tegra::Shader::TextureType::TextureCube:
- return spv::Dim::Cube;
- default:
- UNIMPLEMENTED_MSG("Unimplemented sampler type={}", sampler.type);
- return spv::Dim::Dim2D;
- }
-}
-
-std::pair<spv::Dim, bool> GetImageDim(const ImageEntry& image) {
- switch (image.type) {
- case Tegra::Shader::ImageType::Texture1D:
- return {spv::Dim::Dim1D, false};
- case Tegra::Shader::ImageType::TextureBuffer:
- return {spv::Dim::Buffer, false};
- case Tegra::Shader::ImageType::Texture1DArray:
- return {spv::Dim::Dim1D, true};
- case Tegra::Shader::ImageType::Texture2D:
- return {spv::Dim::Dim2D, false};
- case Tegra::Shader::ImageType::Texture2DArray:
- return {spv::Dim::Dim2D, true};
- case Tegra::Shader::ImageType::Texture3D:
- return {spv::Dim::Dim3D, false};
- default:
- UNIMPLEMENTED_MSG("Unimplemented image type={}", image.type);
- return {spv::Dim::Dim2D, false};
- }
-}
-
-/// Returns the number of vertices present in a primitive topology.
-u32 GetNumPrimitiveTopologyVertices(Maxwell::PrimitiveTopology primitive_topology) {
- switch (primitive_topology) {
- case Maxwell::PrimitiveTopology::Points:
- return 1;
- case Maxwell::PrimitiveTopology::Lines:
- case Maxwell::PrimitiveTopology::LineLoop:
- case Maxwell::PrimitiveTopology::LineStrip:
- return 2;
- case Maxwell::PrimitiveTopology::Triangles:
- case Maxwell::PrimitiveTopology::TriangleStrip:
- case Maxwell::PrimitiveTopology::TriangleFan:
- return 3;
- case Maxwell::PrimitiveTopology::LinesAdjacency:
- case Maxwell::PrimitiveTopology::LineStripAdjacency:
- return 4;
- case Maxwell::PrimitiveTopology::TrianglesAdjacency:
- case Maxwell::PrimitiveTopology::TriangleStripAdjacency:
- return 6;
- case Maxwell::PrimitiveTopology::Quads:
- UNIMPLEMENTED_MSG("Quads");
- return 3;
- case Maxwell::PrimitiveTopology::QuadStrip:
- UNIMPLEMENTED_MSG("QuadStrip");
- return 3;
- case Maxwell::PrimitiveTopology::Polygon:
- UNIMPLEMENTED_MSG("Polygon");
- return 3;
- case Maxwell::PrimitiveTopology::Patches:
- UNIMPLEMENTED_MSG("Patches");
- return 3;
- default:
- UNREACHABLE();
- return 3;
- }
-}
-
-spv::ExecutionMode GetExecutionMode(Maxwell::TessellationPrimitive primitive) {
- switch (primitive) {
- case Maxwell::TessellationPrimitive::Isolines:
- return spv::ExecutionMode::Isolines;
- case Maxwell::TessellationPrimitive::Triangles:
- return spv::ExecutionMode::Triangles;
- case Maxwell::TessellationPrimitive::Quads:
- return spv::ExecutionMode::Quads;
- }
- UNREACHABLE();
- return spv::ExecutionMode::Triangles;
-}
-
-spv::ExecutionMode GetExecutionMode(Maxwell::TessellationSpacing spacing) {
- switch (spacing) {
- case Maxwell::TessellationSpacing::Equal:
- return spv::ExecutionMode::SpacingEqual;
- case Maxwell::TessellationSpacing::FractionalOdd:
- return spv::ExecutionMode::SpacingFractionalOdd;
- case Maxwell::TessellationSpacing::FractionalEven:
- return spv::ExecutionMode::SpacingFractionalEven;
- }
- UNREACHABLE();
- return spv::ExecutionMode::SpacingEqual;
-}
-
-spv::ExecutionMode GetExecutionMode(Maxwell::PrimitiveTopology input_topology) {
- switch (input_topology) {
- case Maxwell::PrimitiveTopology::Points:
- return spv::ExecutionMode::InputPoints;
- case Maxwell::PrimitiveTopology::Lines:
- case Maxwell::PrimitiveTopology::LineLoop:
- case Maxwell::PrimitiveTopology::LineStrip:
- return spv::ExecutionMode::InputLines;
- case Maxwell::PrimitiveTopology::Triangles:
- case Maxwell::PrimitiveTopology::TriangleStrip:
- case Maxwell::PrimitiveTopology::TriangleFan:
- return spv::ExecutionMode::Triangles;
- case Maxwell::PrimitiveTopology::LinesAdjacency:
- case Maxwell::PrimitiveTopology::LineStripAdjacency:
- return spv::ExecutionMode::InputLinesAdjacency;
- case Maxwell::PrimitiveTopology::TrianglesAdjacency:
- case Maxwell::PrimitiveTopology::TriangleStripAdjacency:
- return spv::ExecutionMode::InputTrianglesAdjacency;
- case Maxwell::PrimitiveTopology::Quads:
- UNIMPLEMENTED_MSG("Quads");
- return spv::ExecutionMode::Triangles;
- case Maxwell::PrimitiveTopology::QuadStrip:
- UNIMPLEMENTED_MSG("QuadStrip");
- return spv::ExecutionMode::Triangles;
- case Maxwell::PrimitiveTopology::Polygon:
- UNIMPLEMENTED_MSG("Polygon");
- return spv::ExecutionMode::Triangles;
- case Maxwell::PrimitiveTopology::Patches:
- UNIMPLEMENTED_MSG("Patches");
- return spv::ExecutionMode::Triangles;
- }
- UNREACHABLE();
- return spv::ExecutionMode::Triangles;
-}
-
-spv::ExecutionMode GetExecutionMode(Tegra::Shader::OutputTopology output_topology) {
- switch (output_topology) {
- case Tegra::Shader::OutputTopology::PointList:
- return spv::ExecutionMode::OutputPoints;
- case Tegra::Shader::OutputTopology::LineStrip:
- return spv::ExecutionMode::OutputLineStrip;
- case Tegra::Shader::OutputTopology::TriangleStrip:
- return spv::ExecutionMode::OutputTriangleStrip;
- default:
- UNREACHABLE();
- return spv::ExecutionMode::OutputPoints;
- }
-}
-
-/// Returns true if an attribute index is one of the 32 generic attributes
-constexpr bool IsGenericAttribute(Attribute::Index attribute) {
- return attribute >= Attribute::Index::Attribute_0 &&
- attribute <= Attribute::Index::Attribute_31;
-}
-
-/// Returns the location of a generic attribute
-u32 GetGenericAttributeLocation(Attribute::Index attribute) {
- ASSERT(IsGenericAttribute(attribute));
- return static_cast<u32>(attribute) - static_cast<u32>(Attribute::Index::Attribute_0);
-}
-
-/// Returns true if an object has to be treated as precise
-bool IsPrecise(Operation operand) {
- const auto& meta{operand.GetMeta()};
- if (std::holds_alternative<MetaArithmetic>(meta)) {
- return std::get<MetaArithmetic>(meta).precise;
- }
- return false;
-}
-
-class SPIRVDecompiler final : public Sirit::Module {
-public:
- explicit SPIRVDecompiler(const Device& device_, const ShaderIR& ir_, ShaderType stage_,
- const Registry& registry_, const Specialization& specialization_)
- : Module(0x00010300), device{device_}, ir{ir_}, stage{stage_}, header{ir_.GetHeader()},
- registry{registry_}, specialization{specialization_} {
- if (stage_ != ShaderType::Compute) {
- transform_feedback = BuildTransformFeedback(registry_.GetGraphicsInfo());
- }
-
- AddCapability(spv::Capability::Shader);
- AddCapability(spv::Capability::UniformAndStorageBuffer16BitAccess);
- AddCapability(spv::Capability::ImageQuery);
- AddCapability(spv::Capability::Image1D);
- AddCapability(spv::Capability::ImageBuffer);
- AddCapability(spv::Capability::ImageGatherExtended);
- AddCapability(spv::Capability::SampledBuffer);
- AddCapability(spv::Capability::StorageImageWriteWithoutFormat);
- AddCapability(spv::Capability::DrawParameters);
- AddCapability(spv::Capability::SubgroupBallotKHR);
- AddCapability(spv::Capability::SubgroupVoteKHR);
- AddExtension("SPV_KHR_16bit_storage");
- AddExtension("SPV_KHR_shader_ballot");
- AddExtension("SPV_KHR_subgroup_vote");
- AddExtension("SPV_KHR_storage_buffer_storage_class");
- AddExtension("SPV_KHR_variable_pointers");
- AddExtension("SPV_KHR_shader_draw_parameters");
-
- if (!transform_feedback.empty()) {
- if (device.IsExtTransformFeedbackSupported()) {
- AddCapability(spv::Capability::TransformFeedback);
- } else {
- LOG_ERROR(Render_Vulkan, "Shader requires transform feedbacks but these are not "
- "supported on this device");
- }
- }
- if (ir.UsesLayer() || ir.UsesViewportIndex()) {
- if (ir.UsesViewportIndex()) {
- AddCapability(spv::Capability::MultiViewport);
- }
- if (stage != ShaderType::Geometry && device.IsExtShaderViewportIndexLayerSupported()) {
- AddExtension("SPV_EXT_shader_viewport_index_layer");
- AddCapability(spv::Capability::ShaderViewportIndexLayerEXT);
- }
- }
- if (device.IsFormatlessImageLoadSupported()) {
- AddCapability(spv::Capability::StorageImageReadWithoutFormat);
- }
- if (device.IsFloat16Supported()) {
- AddCapability(spv::Capability::Float16);
- }
- t_scalar_half = Name(TypeFloat(device_.IsFloat16Supported() ? 16 : 32), "scalar_half");
- t_half = Name(TypeVector(t_scalar_half, 2), "half");
-
- const Id main = Decompile();
-
- switch (stage) {
- case ShaderType::Vertex:
- AddEntryPoint(spv::ExecutionModel::Vertex, main, "main", interfaces);
- break;
- case ShaderType::TesselationControl:
- AddCapability(spv::Capability::Tessellation);
- AddEntryPoint(spv::ExecutionModel::TessellationControl, main, "main", interfaces);
- AddExecutionMode(main, spv::ExecutionMode::OutputVertices,
- header.common2.threads_per_input_primitive);
- break;
- case ShaderType::TesselationEval: {
- const auto& info = registry.GetGraphicsInfo();
- AddCapability(spv::Capability::Tessellation);
- AddEntryPoint(spv::ExecutionModel::TessellationEvaluation, main, "main", interfaces);
- AddExecutionMode(main, GetExecutionMode(info.tessellation_primitive));
- AddExecutionMode(main, GetExecutionMode(info.tessellation_spacing));
- AddExecutionMode(main, info.tessellation_clockwise
- ? spv::ExecutionMode::VertexOrderCw
- : spv::ExecutionMode::VertexOrderCcw);
- break;
- }
- case ShaderType::Geometry: {
- const auto& info = registry.GetGraphicsInfo();
- AddCapability(spv::Capability::Geometry);
- AddEntryPoint(spv::ExecutionModel::Geometry, main, "main", interfaces);
- AddExecutionMode(main, GetExecutionMode(info.primitive_topology));
- AddExecutionMode(main, GetExecutionMode(header.common3.output_topology));
- AddExecutionMode(main, spv::ExecutionMode::OutputVertices,
- header.common4.max_output_vertices);
- // TODO(Rodrigo): Where can we get this info from?
- AddExecutionMode(main, spv::ExecutionMode::Invocations, 1U);
- break;
- }
- case ShaderType::Fragment:
- AddEntryPoint(spv::ExecutionModel::Fragment, main, "main", interfaces);
- AddExecutionMode(main, spv::ExecutionMode::OriginUpperLeft);
- if (header.ps.omap.depth) {
- AddExecutionMode(main, spv::ExecutionMode::DepthReplacing);
- }
- if (specialization.early_fragment_tests) {
- AddExecutionMode(main, spv::ExecutionMode::EarlyFragmentTests);
- }
- break;
- case ShaderType::Compute:
- const auto workgroup_size = specialization.workgroup_size;
- AddExecutionMode(main, spv::ExecutionMode::LocalSize, workgroup_size[0],
- workgroup_size[1], workgroup_size[2]);
- AddEntryPoint(spv::ExecutionModel::GLCompute, main, "main", interfaces);
- break;
- }
- }
-
-private:
- Id Decompile() {
- DeclareCommon();
- DeclareVertex();
- DeclareTessControl();
- DeclareTessEval();
- DeclareGeometry();
- DeclareFragment();
- DeclareCompute();
- DeclareRegisters();
- DeclareCustomVariables();
- DeclarePredicates();
- DeclareLocalMemory();
- DeclareSharedMemory();
- DeclareInternalFlags();
- DeclareInputAttributes();
- DeclareOutputAttributes();
-
- u32 binding = specialization.base_binding;
- binding = DeclareConstantBuffers(binding);
- binding = DeclareGlobalBuffers(binding);
- binding = DeclareUniformTexels(binding);
- binding = DeclareSamplers(binding);
- binding = DeclareStorageTexels(binding);
- binding = DeclareImages(binding);
-
- const Id main = OpFunction(t_void, {}, TypeFunction(t_void));
- AddLabel();
-
- if (ir.IsDecompiled()) {
- DeclareFlowVariables();
- DecompileAST();
- } else {
- AllocateLabels();
- DecompileBranchMode();
- }
-
- OpReturn();
- OpFunctionEnd();
-
- return main;
- }
-
- void DefinePrologue() {
- if (stage == ShaderType::Vertex) {
- // Clear Position to avoid reading trash on the Z conversion.
- const auto position_index = out_indices.position.value();
- const Id position = AccessElement(t_out_float4, out_vertex, position_index);
- OpStore(position, v_varying_default);
-
- if (specialization.point_size) {
- const u32 point_size_index = out_indices.point_size.value();
- const Id out_point_size = AccessElement(t_out_float, out_vertex, point_size_index);
- OpStore(out_point_size, Constant(t_float, *specialization.point_size));
- }
- }
- }
-
- void DecompileAST();
-
- void DecompileBranchMode() {
- const u32 first_address = ir.GetBasicBlocks().begin()->first;
- const Id loop_label = OpLabel("loop");
- const Id merge_label = OpLabel("merge");
- const Id dummy_label = OpLabel();
- const Id jump_label = OpLabel();
- continue_label = OpLabel("continue");
-
- std::vector<Sirit::Literal> literals;
- std::vector<Id> branch_labels;
- for (const auto& [literal, label] : labels) {
- literals.push_back(literal);
- branch_labels.push_back(label);
- }
-
- jmp_to = OpVariable(TypePointer(spv::StorageClass::Function, t_uint),
- spv::StorageClass::Function, Constant(t_uint, first_address));
- AddLocalVariable(jmp_to);
-
- std::tie(ssy_flow_stack, ssy_flow_stack_top) = CreateFlowStack();
- std::tie(pbk_flow_stack, pbk_flow_stack_top) = CreateFlowStack();
-
- Name(jmp_to, "jmp_to");
- Name(ssy_flow_stack, "ssy_flow_stack");
- Name(ssy_flow_stack_top, "ssy_flow_stack_top");
- Name(pbk_flow_stack, "pbk_flow_stack");
- Name(pbk_flow_stack_top, "pbk_flow_stack_top");
-
- DefinePrologue();
-
- OpBranch(loop_label);
- AddLabel(loop_label);
- OpLoopMerge(merge_label, continue_label, spv::LoopControlMask::MaskNone);
- OpBranch(dummy_label);
-
- AddLabel(dummy_label);
- const Id default_branch = OpLabel();
- const Id jmp_to_load = OpLoad(t_uint, jmp_to);
- OpSelectionMerge(jump_label, spv::SelectionControlMask::MaskNone);
- OpSwitch(jmp_to_load, default_branch, literals, branch_labels);
-
- AddLabel(default_branch);
- OpReturn();
-
- for (const auto& [address, bb] : ir.GetBasicBlocks()) {
- AddLabel(labels.at(address));
-
- VisitBasicBlock(bb);
-
- const auto next_it = labels.lower_bound(address + 1);
- const Id next_label = next_it != labels.end() ? next_it->second : default_branch;
- OpBranch(next_label);
- }
-
- AddLabel(jump_label);
- OpBranch(continue_label);
- AddLabel(continue_label);
- OpBranch(loop_label);
- AddLabel(merge_label);
- }
-
-private:
- friend class ASTDecompiler;
- friend class ExprDecompiler;
-
- static constexpr auto INTERNAL_FLAGS_COUNT = static_cast<std::size_t>(InternalFlag::Amount);
-
- void AllocateLabels() {
- for (const auto& pair : ir.GetBasicBlocks()) {
- const u32 address = pair.first;
- labels.emplace(address, OpLabel(fmt::format("label_0x{:x}", address)));
- }
- }
-
- void DeclareCommon() {
- thread_id =
- DeclareInputBuiltIn(spv::BuiltIn::SubgroupLocalInvocationId, t_in_uint, "thread_id");
- thread_masks[0] =
- DeclareInputBuiltIn(spv::BuiltIn::SubgroupEqMask, t_in_uint4, "thread_eq_mask");
- thread_masks[1] =
- DeclareInputBuiltIn(spv::BuiltIn::SubgroupGeMask, t_in_uint4, "thread_ge_mask");
- thread_masks[2] =
- DeclareInputBuiltIn(spv::BuiltIn::SubgroupGtMask, t_in_uint4, "thread_gt_mask");
- thread_masks[3] =
- DeclareInputBuiltIn(spv::BuiltIn::SubgroupLeMask, t_in_uint4, "thread_le_mask");
- thread_masks[4] =
- DeclareInputBuiltIn(spv::BuiltIn::SubgroupLtMask, t_in_uint4, "thread_lt_mask");
- }
-
- void DeclareVertex() {
- if (stage != ShaderType::Vertex) {
- return;
- }
- Id out_vertex_struct;
- std::tie(out_vertex_struct, out_indices) = DeclareVertexStruct();
- const Id vertex_ptr = TypePointer(spv::StorageClass::Output, out_vertex_struct);
- out_vertex = OpVariable(vertex_ptr, spv::StorageClass::Output);
- interfaces.push_back(AddGlobalVariable(Name(out_vertex, "out_vertex")));
-
- // Declare input attributes
- vertex_index = DeclareInputBuiltIn(spv::BuiltIn::VertexIndex, t_in_int, "vertex_index");
- instance_index =
- DeclareInputBuiltIn(spv::BuiltIn::InstanceIndex, t_in_int, "instance_index");
- base_vertex = DeclareInputBuiltIn(spv::BuiltIn::BaseVertex, t_in_int, "base_vertex");
- base_instance = DeclareInputBuiltIn(spv::BuiltIn::BaseInstance, t_in_int, "base_instance");
- }
-
- void DeclareTessControl() {
- if (stage != ShaderType::TesselationControl) {
- return;
- }
- DeclareInputVertexArray(NumInputPatches);
- DeclareOutputVertexArray(header.common2.threads_per_input_primitive);
-
- tess_level_outer = DeclareBuiltIn(
- spv::BuiltIn::TessLevelOuter, spv::StorageClass::Output,
- TypePointer(spv::StorageClass::Output, TypeArray(t_float, Constant(t_uint, 4U))),
- "tess_level_outer");
- Decorate(tess_level_outer, spv::Decoration::Patch);
-
- tess_level_inner = DeclareBuiltIn(
- spv::BuiltIn::TessLevelInner, spv::StorageClass::Output,
- TypePointer(spv::StorageClass::Output, TypeArray(t_float, Constant(t_uint, 2U))),
- "tess_level_inner");
- Decorate(tess_level_inner, spv::Decoration::Patch);
-
- invocation_id = DeclareInputBuiltIn(spv::BuiltIn::InvocationId, t_in_int, "invocation_id");
- }
-
- void DeclareTessEval() {
- if (stage != ShaderType::TesselationEval) {
- return;
- }
- DeclareInputVertexArray(NumInputPatches);
- DeclareOutputVertex();
-
- tess_coord = DeclareInputBuiltIn(spv::BuiltIn::TessCoord, t_in_float3, "tess_coord");
- }
-
- void DeclareGeometry() {
- if (stage != ShaderType::Geometry) {
- return;
- }
- const auto& info = registry.GetGraphicsInfo();
- const u32 num_input = GetNumPrimitiveTopologyVertices(info.primitive_topology);
- DeclareInputVertexArray(num_input);
- DeclareOutputVertex();
- }
-
- void DeclareFragment() {
- if (stage != ShaderType::Fragment) {
- return;
- }
-
- for (u32 rt = 0; rt < static_cast<u32>(std::size(frag_colors)); ++rt) {
- if (!IsRenderTargetEnabled(rt)) {
- continue;
- }
- const Id id = AddGlobalVariable(OpVariable(t_out_float4, spv::StorageClass::Output));
- Name(id, fmt::format("frag_color{}", rt));
- Decorate(id, spv::Decoration::Location, rt);
-
- frag_colors[rt] = id;
- interfaces.push_back(id);
- }
-
- if (header.ps.omap.depth) {
- frag_depth = AddGlobalVariable(OpVariable(t_out_float, spv::StorageClass::Output));
- Name(frag_depth, "frag_depth");
- Decorate(frag_depth, spv::Decoration::BuiltIn,
- static_cast<u32>(spv::BuiltIn::FragDepth));
-
- interfaces.push_back(frag_depth);
- }
-
- frag_coord = DeclareInputBuiltIn(spv::BuiltIn::FragCoord, t_in_float4, "frag_coord");
- front_facing = DeclareInputBuiltIn(spv::BuiltIn::FrontFacing, t_in_bool, "front_facing");
- point_coord = DeclareInputBuiltIn(spv::BuiltIn::PointCoord, t_in_float2, "point_coord");
- }
-
- void DeclareCompute() {
- if (stage != ShaderType::Compute) {
- return;
- }
-
- workgroup_id = DeclareInputBuiltIn(spv::BuiltIn::WorkgroupId, t_in_uint3, "workgroup_id");
- local_invocation_id =
- DeclareInputBuiltIn(spv::BuiltIn::LocalInvocationId, t_in_uint3, "local_invocation_id");
- }
-
- void DeclareRegisters() {
- for (const u32 gpr : ir.GetRegisters()) {
- const Id id = OpVariable(t_prv_float, spv::StorageClass::Private, v_float_zero);
- Name(id, fmt::format("gpr_{}", gpr));
- registers.emplace(gpr, AddGlobalVariable(id));
- }
- }
-
- void DeclareCustomVariables() {
- const u32 num_custom_variables = ir.GetNumCustomVariables();
- for (u32 i = 0; i < num_custom_variables; ++i) {
- const Id id = OpVariable(t_prv_float, spv::StorageClass::Private, v_float_zero);
- Name(id, fmt::format("custom_var_{}", i));
- custom_variables.emplace(i, AddGlobalVariable(id));
- }
- }
-
- void DeclarePredicates() {
- for (const auto pred : ir.GetPredicates()) {
- const Id id = OpVariable(t_prv_bool, spv::StorageClass::Private, v_false);
- Name(id, fmt::format("pred_{}", static_cast<u32>(pred)));
- predicates.emplace(pred, AddGlobalVariable(id));
- }
- }
-
- void DeclareFlowVariables() {
- for (u32 i = 0; i < ir.GetASTNumVariables(); i++) {
- const Id id = OpVariable(t_prv_bool, spv::StorageClass::Private, v_false);
- Name(id, fmt::format("flow_var_{}", static_cast<u32>(i)));
- flow_variables.emplace(i, AddGlobalVariable(id));
- }
- }
-
- void DeclareLocalMemory() {
- // TODO(Rodrigo): Unstub kernel local memory size and pass it from a register at
- // specialization time.
- const u64 lmem_size = stage == ShaderType::Compute ? 0x400 : header.GetLocalMemorySize();
- if (lmem_size == 0) {
- return;
- }
- const auto element_count = static_cast<u32>(Common::AlignUp(lmem_size, 4) / 4);
- const Id type_array = TypeArray(t_float, Constant(t_uint, element_count));
- const Id type_pointer = TypePointer(spv::StorageClass::Private, type_array);
- Name(type_pointer, "LocalMemory");
-
- local_memory =
- OpVariable(type_pointer, spv::StorageClass::Private, ConstantNull(type_array));
- AddGlobalVariable(Name(local_memory, "local_memory"));
- }
-
- void DeclareSharedMemory() {
- if (stage != ShaderType::Compute) {
- return;
- }
- t_smem_uint = TypePointer(spv::StorageClass::Workgroup, t_uint);
-
- u32 smem_size = specialization.shared_memory_size * 4;
- if (smem_size == 0) {
- // Avoid declaring an empty array.
- return;
- }
- const u32 limit = device.GetMaxComputeSharedMemorySize();
- if (smem_size > limit) {
- LOG_ERROR(Render_Vulkan, "Shared memory size {} is clamped to host's limit {}",
- smem_size, limit);
- smem_size = limit;
- }
-
- const Id type_array = TypeArray(t_uint, Constant(t_uint, smem_size / 4));
- const Id type_pointer = TypePointer(spv::StorageClass::Workgroup, type_array);
- Name(type_pointer, "SharedMemory");
-
- shared_memory = OpVariable(type_pointer, spv::StorageClass::Workgroup);
- AddGlobalVariable(Name(shared_memory, "shared_memory"));
- }
-
- void DeclareInternalFlags() {
- static constexpr std::array names{"zero", "sign", "carry", "overflow"};
-
- for (std::size_t flag = 0; flag < INTERNAL_FLAGS_COUNT; ++flag) {
- const Id id = OpVariable(t_prv_bool, spv::StorageClass::Private, v_false);
- internal_flags[flag] = AddGlobalVariable(Name(id, names[flag]));
- }
- }
-
- void DeclareInputVertexArray(u32 length) {
- constexpr auto storage = spv::StorageClass::Input;
- std::tie(in_indices, in_vertex) = DeclareVertexArray(storage, "in_indices", length);
- }
-
- void DeclareOutputVertexArray(u32 length) {
- constexpr auto storage = spv::StorageClass::Output;
- std::tie(out_indices, out_vertex) = DeclareVertexArray(storage, "out_indices", length);
- }
-
- std::tuple<VertexIndices, Id> DeclareVertexArray(spv::StorageClass storage_class,
- std::string name, u32 length) {
- const auto [struct_id, indices] = DeclareVertexStruct();
- const Id vertex_array = TypeArray(struct_id, Constant(t_uint, length));
- const Id vertex_ptr = TypePointer(storage_class, vertex_array);
- const Id vertex = OpVariable(vertex_ptr, storage_class);
- AddGlobalVariable(Name(vertex, std::move(name)));
- interfaces.push_back(vertex);
- return {indices, vertex};
- }
-
- void DeclareOutputVertex() {
- Id out_vertex_struct;
- std::tie(out_vertex_struct, out_indices) = DeclareVertexStruct();
- const Id out_vertex_ptr = TypePointer(spv::StorageClass::Output, out_vertex_struct);
- out_vertex = OpVariable(out_vertex_ptr, spv::StorageClass::Output);
- interfaces.push_back(AddGlobalVariable(Name(out_vertex, "out_vertex")));
- }
-
- void DeclareInputAttributes() {
- for (const auto index : ir.GetInputAttributes()) {
- if (!IsGenericAttribute(index)) {
- continue;
- }
- const u32 location = GetGenericAttributeLocation(index);
- if (!IsAttributeEnabled(location)) {
- continue;
- }
- const auto type_descriptor = GetAttributeType(location);
- Id type;
- if (IsInputAttributeArray()) {
- type = GetTypeVectorDefinitionLut(type_descriptor.type).at(3);
- type = TypeArray(type, Constant(t_uint, GetNumInputVertices()));
- type = TypePointer(spv::StorageClass::Input, type);
- } else {
- type = type_descriptor.vector;
- }
- const Id id = OpVariable(type, spv::StorageClass::Input);
- AddGlobalVariable(Name(id, fmt::format("in_attr{}", location)));
- input_attributes.emplace(index, id);
- interfaces.push_back(id);
-
- Decorate(id, spv::Decoration::Location, location);
-
- if (stage != ShaderType::Fragment) {
- continue;
- }
- switch (header.ps.GetPixelImap(location)) {
- case PixelImap::Constant:
- Decorate(id, spv::Decoration::Flat);
- break;
- case PixelImap::Perspective:
- // Default
- break;
- case PixelImap::ScreenLinear:
- Decorate(id, spv::Decoration::NoPerspective);
- break;
- default:
- UNREACHABLE_MSG("Unused attribute being fetched");
- }
- }
- }
-
- void DeclareOutputAttributes() {
- if (stage == ShaderType::Compute || stage == ShaderType::Fragment) {
- return;
- }
-
- UNIMPLEMENTED_IF(registry.GetGraphicsInfo().tfb_enabled && stage != ShaderType::Vertex);
- for (const auto index : ir.GetOutputAttributes()) {
- if (!IsGenericAttribute(index)) {
- continue;
- }
- DeclareOutputAttribute(index);
- }
- }
-
- void DeclareOutputAttribute(Attribute::Index index) {
- static constexpr std::string_view swizzle = "xyzw";
-
- const u32 location = GetGenericAttributeLocation(index);
- u8 element = 0;
- while (element < 4) {
- const std::size_t remainder = 4 - element;
-
- std::size_t num_components = remainder;
- const std::optional tfb = GetTransformFeedbackInfo(index, element);
- if (tfb) {
- num_components = tfb->components;
- }
-
- Id type = GetTypeVectorDefinitionLut(Type::Float).at(num_components - 1);
- Id varying_default = v_varying_default;
- if (IsOutputAttributeArray()) {
- const u32 num = GetNumOutputVertices();
- type = TypeArray(type, Constant(t_uint, num));
- if (device.GetDriverID() != VK_DRIVER_ID_INTEL_PROPRIETARY_WINDOWS_KHR) {
- // Intel's proprietary driver fails to setup defaults for arrayed output
- // attributes.
- varying_default = ConstantComposite(type, std::vector(num, varying_default));
- }
- }
- type = TypePointer(spv::StorageClass::Output, type);
-
- std::string name = fmt::format("out_attr{}", location);
- if (num_components < 4 || element > 0) {
- name = fmt::format("{}_{}", name, swizzle.substr(element, num_components));
- }
-
- const Id id = OpVariable(type, spv::StorageClass::Output, varying_default);
- Name(AddGlobalVariable(id), name);
-
- GenericVaryingDescription description;
- description.id = id;
- description.first_element = element;
- description.is_scalar = num_components == 1;
- for (u32 i = 0; i < num_components; ++i) {
- const u8 offset = static_cast<u8>(static_cast<u32>(index) * 4 + element + i);
- output_attributes.emplace(offset, description);
- }
- interfaces.push_back(id);
-
- Decorate(id, spv::Decoration::Location, location);
- if (element > 0) {
- Decorate(id, spv::Decoration::Component, static_cast<u32>(element));
- }
- if (tfb && device.IsExtTransformFeedbackSupported()) {
- Decorate(id, spv::Decoration::XfbBuffer, static_cast<u32>(tfb->buffer));
- Decorate(id, spv::Decoration::XfbStride, static_cast<u32>(tfb->stride));
- Decorate(id, spv::Decoration::Offset, static_cast<u32>(tfb->offset));
- }
-
- element = static_cast<u8>(static_cast<std::size_t>(element) + num_components);
- }
- }
-
- std::optional<VaryingTFB> GetTransformFeedbackInfo(Attribute::Index index, u8 element = 0) {
- const u8 location = static_cast<u8>(static_cast<u32>(index) * 4 + element);
- const auto it = transform_feedback.find(location);
- if (it == transform_feedback.end()) {
- return {};
- }
- return it->second;
- }
-
- u32 DeclareConstantBuffers(u32 binding) {
- for (const auto& [index, size] : ir.GetConstantBuffers()) {
- const Id type = device.IsKhrUniformBufferStandardLayoutSupported() ? t_cbuf_scalar_ubo
- : t_cbuf_std140_ubo;
- const Id id = OpVariable(type, spv::StorageClass::Uniform);
- AddGlobalVariable(Name(id, fmt::format("cbuf_{}", index)));
-
- Decorate(id, spv::Decoration::Binding, binding++);
- Decorate(id, spv::Decoration::DescriptorSet, DESCRIPTOR_SET);
- constant_buffers.emplace(index, id);
- }
- return binding;
- }
-
- u32 DeclareGlobalBuffers(u32 binding) {
- for (const auto& [base, usage] : ir.GetGlobalMemory()) {
- const Id id = OpVariable(t_gmem_ssbo, spv::StorageClass::StorageBuffer);
- AddGlobalVariable(
- Name(id, fmt::format("gmem_{}_{}", base.cbuf_index, base.cbuf_offset)));
-
- Decorate(id, spv::Decoration::Binding, binding++);
- Decorate(id, spv::Decoration::DescriptorSet, DESCRIPTOR_SET);
- global_buffers.emplace(base, id);
- }
- return binding;
- }
-
- u32 DeclareUniformTexels(u32 binding) {
- for (const auto& sampler : ir.GetSamplers()) {
- if (!sampler.is_buffer) {
- continue;
- }
- ASSERT(!sampler.is_array);
- ASSERT(!sampler.is_shadow);
-
- constexpr auto dim = spv::Dim::Buffer;
- constexpr int depth = 0;
- constexpr int arrayed = 0;
- constexpr bool ms = false;
- constexpr int sampled = 1;
- constexpr auto format = spv::ImageFormat::Unknown;
- const Id image_type = TypeImage(t_float, dim, depth, arrayed, ms, sampled, format);
- const Id pointer_type = TypePointer(spv::StorageClass::UniformConstant, image_type);
- const Id id = OpVariable(pointer_type, spv::StorageClass::UniformConstant);
- AddGlobalVariable(Name(id, fmt::format("sampler_{}", sampler.index)));
- Decorate(id, spv::Decoration::Binding, binding++);
- Decorate(id, spv::Decoration::DescriptorSet, DESCRIPTOR_SET);
-
- uniform_texels.emplace(sampler.index, TexelBuffer{image_type, id});
- }
- return binding;
- }
-
- u32 DeclareSamplers(u32 binding) {
- for (const auto& sampler : ir.GetSamplers()) {
- if (sampler.is_buffer) {
- continue;
- }
- const auto dim = GetSamplerDim(sampler);
- const int depth = sampler.is_shadow ? 1 : 0;
- const int arrayed = sampler.is_array ? 1 : 0;
- constexpr bool ms = false;
- constexpr int sampled = 1;
- constexpr auto format = spv::ImageFormat::Unknown;
- const Id image_type = TypeImage(t_float, dim, depth, arrayed, ms, sampled, format);
- const Id sampler_type = TypeSampledImage(image_type);
- const Id sampler_pointer_type =
- TypePointer(spv::StorageClass::UniformConstant, sampler_type);
- const Id type = sampler.is_indexed
- ? TypeArray(sampler_type, Constant(t_uint, sampler.size))
- : sampler_type;
- const Id pointer_type = TypePointer(spv::StorageClass::UniformConstant, type);
- const Id id = OpVariable(pointer_type, spv::StorageClass::UniformConstant);
- AddGlobalVariable(Name(id, fmt::format("sampler_{}", sampler.index)));
- Decorate(id, spv::Decoration::Binding, binding++);
- Decorate(id, spv::Decoration::DescriptorSet, DESCRIPTOR_SET);
-
- sampled_images.emplace(
- sampler.index, SampledImage{image_type, sampler_type, sampler_pointer_type, id});
- }
- return binding;
- }
-
- u32 DeclareStorageTexels(u32 binding) {
- for (const auto& image : ir.GetImages()) {
- if (image.type != Tegra::Shader::ImageType::TextureBuffer) {
- continue;
- }
- DeclareImage(image, binding);
- }
- return binding;
- }
-
- u32 DeclareImages(u32 binding) {
- for (const auto& image : ir.GetImages()) {
- if (image.type == Tegra::Shader::ImageType::TextureBuffer) {
- continue;
- }
- DeclareImage(image, binding);
- }
- return binding;
- }
-
- void DeclareImage(const ImageEntry& image, u32& binding) {
- const auto [dim, arrayed] = GetImageDim(image);
- constexpr int depth = 0;
- constexpr bool ms = false;
- constexpr int sampled = 2; // This won't be accessed with a sampler
- const auto format = image.is_atomic ? spv::ImageFormat::R32ui : spv::ImageFormat::Unknown;
- const Id image_type = TypeImage(t_uint, dim, depth, arrayed, ms, sampled, format, {});
- const Id pointer_type = TypePointer(spv::StorageClass::UniformConstant, image_type);
- const Id id = OpVariable(pointer_type, spv::StorageClass::UniformConstant);
- AddGlobalVariable(Name(id, fmt::format("image_{}", image.index)));
-
- Decorate(id, spv::Decoration::Binding, binding++);
- Decorate(id, spv::Decoration::DescriptorSet, DESCRIPTOR_SET);
- if (image.is_read && !image.is_written) {
- Decorate(id, spv::Decoration::NonWritable);
- } else if (image.is_written && !image.is_read) {
- Decorate(id, spv::Decoration::NonReadable);
- }
-
- images.emplace(image.index, StorageImage{image_type, id});
- }
-
- bool IsRenderTargetEnabled(u32 rt) const {
- for (u32 component = 0; component < 4; ++component) {
- if (header.ps.IsColorComponentOutputEnabled(rt, component)) {
- return true;
- }
- }
- return false;
- }
-
- bool IsInputAttributeArray() const {
- return stage == ShaderType::TesselationControl || stage == ShaderType::TesselationEval ||
- stage == ShaderType::Geometry;
- }
-
- bool IsOutputAttributeArray() const {
- return stage == ShaderType::TesselationControl;
- }
-
- bool IsAttributeEnabled(u32 location) const {
- return stage != ShaderType::Vertex || specialization.enabled_attributes[location];
- }
-
- u32 GetNumInputVertices() const {
- switch (stage) {
- case ShaderType::Geometry:
- return GetNumPrimitiveTopologyVertices(registry.GetGraphicsInfo().primitive_topology);
- case ShaderType::TesselationControl:
- case ShaderType::TesselationEval:
- return NumInputPatches;
- default:
- UNREACHABLE();
- return 1;
- }
- }
-
- u32 GetNumOutputVertices() const {
- switch (stage) {
- case ShaderType::TesselationControl:
- return header.common2.threads_per_input_primitive;
- default:
- UNREACHABLE();
- return 1;
- }
- }
-
- std::tuple<Id, VertexIndices> DeclareVertexStruct() {
- struct BuiltIn {
- Id type;
- spv::BuiltIn builtin;
- const char* name;
- };
- std::vector<BuiltIn> members;
- members.reserve(4);
-
- const auto AddBuiltIn = [&](Id type, spv::BuiltIn builtin, const char* name) {
- const auto index = static_cast<u32>(members.size());
- members.push_back(BuiltIn{type, builtin, name});
- return index;
- };
-
- VertexIndices indices;
- indices.position = AddBuiltIn(t_float4, spv::BuiltIn::Position, "position");
-
- if (ir.UsesLayer()) {
- if (stage != ShaderType::Vertex || device.IsExtShaderViewportIndexLayerSupported()) {
- indices.layer = AddBuiltIn(t_int, spv::BuiltIn::Layer, "layer");
- } else {
- LOG_ERROR(
- Render_Vulkan,
- "Shader requires Layer but it's not supported on this stage with this device.");
- }
- }
-
- if (ir.UsesViewportIndex()) {
- if (stage != ShaderType::Vertex || device.IsExtShaderViewportIndexLayerSupported()) {
- indices.viewport = AddBuiltIn(t_int, spv::BuiltIn::ViewportIndex, "viewport_index");
- } else {
- LOG_ERROR(Render_Vulkan, "Shader requires ViewportIndex but it's not supported on "
- "this stage with this device.");
- }
- }
-
- if (ir.UsesPointSize() || specialization.point_size) {
- indices.point_size = AddBuiltIn(t_float, spv::BuiltIn::PointSize, "point_size");
- }
-
- const auto& ir_output_attributes = ir.GetOutputAttributes();
- const bool declare_clip_distances = std::any_of(
- ir_output_attributes.begin(), ir_output_attributes.end(), [](const auto& index) {
- return index == Attribute::Index::ClipDistances0123 ||
- index == Attribute::Index::ClipDistances4567;
- });
- if (declare_clip_distances) {
- indices.clip_distances = AddBuiltIn(TypeArray(t_float, Constant(t_uint, 8)),
- spv::BuiltIn::ClipDistance, "clip_distances");
- }
-
- std::vector<Id> member_types;
- member_types.reserve(members.size());
- for (std::size_t i = 0; i < members.size(); ++i) {
- member_types.push_back(members[i].type);
- }
- const Id per_vertex_struct = Name(TypeStruct(member_types), "PerVertex");
- Decorate(per_vertex_struct, spv::Decoration::Block);
-
- for (std::size_t index = 0; index < members.size(); ++index) {
- const auto& member = members[index];
- MemberName(per_vertex_struct, static_cast<u32>(index), member.name);
- MemberDecorate(per_vertex_struct, static_cast<u32>(index), spv::Decoration::BuiltIn,
- static_cast<u32>(member.builtin));
- }
-
- return {per_vertex_struct, indices};
- }
-
- void VisitBasicBlock(const NodeBlock& bb) {
- for (const auto& node : bb) {
- Visit(node);
- }
- }
-
- Expression Visit(const Node& node) {
- if (const auto operation = std::get_if<OperationNode>(&*node)) {
- if (const auto amend_index = operation->GetAmendIndex()) {
- [[maybe_unused]] const Type type = Visit(ir.GetAmendNode(*amend_index)).type;
- ASSERT(type == Type::Void);
- }
- const auto operation_index = static_cast<std::size_t>(operation->GetCode());
- const auto decompiler = operation_decompilers[operation_index];
- if (decompiler == nullptr) {
- UNREACHABLE_MSG("Operation decompiler {} not defined", operation_index);
- }
- return (this->*decompiler)(*operation);
- }
-
- if (const auto gpr = std::get_if<GprNode>(&*node)) {
- const u32 index = gpr->GetIndex();
- if (index == Register::ZeroIndex) {
- return {v_float_zero, Type::Float};
- }
- return {OpLoad(t_float, registers.at(index)), Type::Float};
- }
-
- if (const auto cv = std::get_if<CustomVarNode>(&*node)) {
- const u32 index = cv->GetIndex();
- return {OpLoad(t_float, custom_variables.at(index)), Type::Float};
- }
-
- if (const auto immediate = std::get_if<ImmediateNode>(&*node)) {
- return {Constant(t_uint, immediate->GetValue()), Type::Uint};
- }
-
- if (const auto predicate = std::get_if<PredicateNode>(&*node)) {
- const auto value = [&]() -> Id {
- switch (const auto index = predicate->GetIndex(); index) {
- case Tegra::Shader::Pred::UnusedIndex:
- return v_true;
- case Tegra::Shader::Pred::NeverExecute:
- return v_false;
- default:
- return OpLoad(t_bool, predicates.at(index));
- }
- }();
- if (predicate->IsNegated()) {
- return {OpLogicalNot(t_bool, value), Type::Bool};
- }
- return {value, Type::Bool};
- }
-
- if (const auto abuf = std::get_if<AbufNode>(&*node)) {
- const auto attribute = abuf->GetIndex();
- const u32 element = abuf->GetElement();
- const auto& buffer = abuf->GetBuffer();
-
- const auto ArrayPass = [&](Id pointer_type, Id composite, std::vector<u32> indices) {
- std::vector<Id> members;
- members.reserve(std::size(indices) + 1);
-
- if (buffer && IsInputAttributeArray()) {
- members.push_back(AsUint(Visit(buffer)));
- }
- for (const u32 index : indices) {
- members.push_back(Constant(t_uint, index));
- }
- return OpAccessChain(pointer_type, composite, members);
- };
-
- switch (attribute) {
- case Attribute::Index::Position: {
- if (stage == ShaderType::Fragment) {
- return {OpLoad(t_float, AccessElement(t_in_float, frag_coord, element)),
- Type::Float};
- }
- const std::vector elements = {in_indices.position.value(), element};
- return {OpLoad(t_float, ArrayPass(t_in_float, in_vertex, elements)), Type::Float};
- }
- case Attribute::Index::PointCoord: {
- switch (element) {
- case 0:
- case 1:
- return {OpCompositeExtract(t_float, OpLoad(t_float2, point_coord), element),
- Type::Float};
- }
- UNIMPLEMENTED_MSG("Unimplemented point coord element={}", element);
- return {v_float_zero, Type::Float};
- }
- case Attribute::Index::TessCoordInstanceIDVertexID:
- // TODO(Subv): Find out what the values are for the first two elements when inside a
- // vertex shader, and what's the value of the fourth element when inside a Tess Eval
- // shader.
- switch (element) {
- case 0:
- case 1:
- return {OpLoad(t_float, AccessElement(t_in_float, tess_coord, element)),
- Type::Float};
- case 2:
- return {
- OpISub(t_int, OpLoad(t_int, instance_index), OpLoad(t_int, base_instance)),
- Type::Int};
- case 3:
- return {OpISub(t_int, OpLoad(t_int, vertex_index), OpLoad(t_int, base_vertex)),
- Type::Int};
- }
- UNIMPLEMENTED_MSG("Unmanaged TessCoordInstanceIDVertexID element={}", element);
- return {Constant(t_uint, 0U), Type::Uint};
- case Attribute::Index::FrontFacing:
- // TODO(Subv): Find out what the values are for the other elements.
- ASSERT(stage == ShaderType::Fragment);
- if (element == 3) {
- const Id is_front_facing = OpLoad(t_bool, front_facing);
- const Id true_value = Constant(t_int, static_cast<s32>(-1));
- const Id false_value = Constant(t_int, 0);
- return {OpSelect(t_int, is_front_facing, true_value, false_value), Type::Int};
- }
- UNIMPLEMENTED_MSG("Unmanaged FrontFacing element={}", element);
- return {v_float_zero, Type::Float};
- default:
- if (!IsGenericAttribute(attribute)) {
- break;
- }
- const u32 location = GetGenericAttributeLocation(attribute);
- if (!IsAttributeEnabled(location)) {
- // Disabled attributes (also known as constant attributes) always return zero.
- return {v_float_zero, Type::Float};
- }
- const auto type_descriptor = GetAttributeType(location);
- const Type type = type_descriptor.type;
- const Id attribute_id = input_attributes.at(attribute);
- const std::vector elements = {element};
- const Id pointer = ArrayPass(type_descriptor.scalar, attribute_id, elements);
- return {OpLoad(GetTypeDefinition(type), pointer), type};
- }
- UNIMPLEMENTED_MSG("Unhandled input attribute: {}", attribute);
- return {v_float_zero, Type::Float};
- }
-
- if (const auto cbuf = std::get_if<CbufNode>(&*node)) {
- const Node& offset = cbuf->GetOffset();
- const Id buffer_id = constant_buffers.at(cbuf->GetIndex());
-
- Id pointer{};
- if (device.IsKhrUniformBufferStandardLayoutSupported()) {
- const Id buffer_offset =
- OpShiftRightLogical(t_uint, AsUint(Visit(offset)), Constant(t_uint, 2U));
- pointer =
- OpAccessChain(t_cbuf_float, buffer_id, Constant(t_uint, 0U), buffer_offset);
- } else {
- Id buffer_index{};
- Id buffer_element{};
- if (const auto immediate = std::get_if<ImmediateNode>(&*offset)) {
- // Direct access
- const u32 offset_imm = immediate->GetValue();
- ASSERT(offset_imm % 4 == 0);
- buffer_index = Constant(t_uint, offset_imm / 16);
- buffer_element = Constant(t_uint, (offset_imm / 4) % 4);
- } else if (std::holds_alternative<OperationNode>(*offset)) {
- // Indirect access
- const Id offset_id = AsUint(Visit(offset));
- const Id unsafe_offset = OpUDiv(t_uint, offset_id, Constant(t_uint, 4));
- const Id final_offset =
- OpUMod(t_uint, unsafe_offset, Constant(t_uint, MaxConstBufferElements - 1));
- buffer_index = OpUDiv(t_uint, final_offset, Constant(t_uint, 4));
- buffer_element = OpUMod(t_uint, final_offset, Constant(t_uint, 4));
- } else {
- UNREACHABLE_MSG("Unmanaged offset node type");
- }
- pointer = OpAccessChain(t_cbuf_float, buffer_id, v_uint_zero, buffer_index,
- buffer_element);
- }
- return {OpLoad(t_float, pointer), Type::Float};
- }
-
- if (const auto gmem = std::get_if<GmemNode>(&*node)) {
- return {OpLoad(t_uint, GetGlobalMemoryPointer(*gmem)), Type::Uint};
- }
-
- if (const auto lmem = std::get_if<LmemNode>(&*node)) {
- Id address = AsUint(Visit(lmem->GetAddress()));
- address = OpShiftRightLogical(t_uint, address, Constant(t_uint, 2U));
- const Id pointer = OpAccessChain(t_prv_float, local_memory, address);
- return {OpLoad(t_float, pointer), Type::Float};
- }
-
- if (const auto smem = std::get_if<SmemNode>(&*node)) {
- return {OpLoad(t_uint, GetSharedMemoryPointer(*smem)), Type::Uint};
- }
-
- if (const auto internal_flag = std::get_if<InternalFlagNode>(&*node)) {
- const Id flag = internal_flags.at(static_cast<std::size_t>(internal_flag->GetFlag()));
- return {OpLoad(t_bool, flag), Type::Bool};
- }
-
- if (const auto conditional = std::get_if<ConditionalNode>(&*node)) {
- if (const auto amend_index = conditional->GetAmendIndex()) {
- [[maybe_unused]] const Type type = Visit(ir.GetAmendNode(*amend_index)).type;
- ASSERT(type == Type::Void);
- }
- // It's invalid to call conditional on nested nodes, use an operation instead
- const Id true_label = OpLabel();
- const Id skip_label = OpLabel();
- const Id condition = AsBool(Visit(conditional->GetCondition()));
- OpSelectionMerge(skip_label, spv::SelectionControlMask::MaskNone);
- OpBranchConditional(condition, true_label, skip_label);
- AddLabel(true_label);
-
- conditional_branch_set = true;
- inside_branch = false;
- VisitBasicBlock(conditional->GetCode());
- conditional_branch_set = false;
- if (!inside_branch) {
- OpBranch(skip_label);
- } else {
- inside_branch = false;
- }
- AddLabel(skip_label);
- return {};
- }
-
- if (const auto comment = std::get_if<CommentNode>(&*node)) {
- if (device.HasDebuggingToolAttached()) {
- // We should insert comments with OpString instead of using named variables
- Name(OpUndef(t_int), comment->GetText());
- }
- return {};
- }
-
- UNREACHABLE();
- return {};
- }
-
- template <Id (Module::*func)(Id, Id), Type result_type, Type type_a = result_type>
- Expression Unary(Operation operation) {
- const Id type_def = GetTypeDefinition(result_type);
- const Id op_a = As(Visit(operation[0]), type_a);
-
- const Id value = (this->*func)(type_def, op_a);
- if (IsPrecise(operation)) {
- Decorate(value, spv::Decoration::NoContraction);
- }
- return {value, result_type};
- }
-
- template <Id (Module::*func)(Id, Id, Id), Type result_type, Type type_a = result_type,
- Type type_b = type_a>
- Expression Binary(Operation operation) {
- const Id type_def = GetTypeDefinition(result_type);
- const Id op_a = As(Visit(operation[0]), type_a);
- const Id op_b = As(Visit(operation[1]), type_b);
-
- const Id value = (this->*func)(type_def, op_a, op_b);
- if (IsPrecise(operation)) {
- Decorate(value, spv::Decoration::NoContraction);
- }
- return {value, result_type};
- }
-
- template <Id (Module::*func)(Id, Id, Id, Id), Type result_type, Type type_a = result_type,
- Type type_b = type_a, Type type_c = type_b>
- Expression Ternary(Operation operation) {
- const Id type_def = GetTypeDefinition(result_type);
- const Id op_a = As(Visit(operation[0]), type_a);
- const Id op_b = As(Visit(operation[1]), type_b);
- const Id op_c = As(Visit(operation[2]), type_c);
-
- const Id value = (this->*func)(type_def, op_a, op_b, op_c);
- if (IsPrecise(operation)) {
- Decorate(value, spv::Decoration::NoContraction);
- }
- return {value, result_type};
- }
-
- template <Id (Module::*func)(Id, Id, Id, Id, Id), Type result_type, Type type_a = result_type,
- Type type_b = type_a, Type type_c = type_b, Type type_d = type_c>
- Expression Quaternary(Operation operation) {
- const Id type_def = GetTypeDefinition(result_type);
- const Id op_a = As(Visit(operation[0]), type_a);
- const Id op_b = As(Visit(operation[1]), type_b);
- const Id op_c = As(Visit(operation[2]), type_c);
- const Id op_d = As(Visit(operation[3]), type_d);
-
- const Id value = (this->*func)(type_def, op_a, op_b, op_c, op_d);
- if (IsPrecise(operation)) {
- Decorate(value, spv::Decoration::NoContraction);
- }
- return {value, result_type};
- }
-
- Expression Assign(Operation operation) {
- const Node& dest = operation[0];
- const Node& src = operation[1];
-
- Expression target{};
- if (const auto gpr = std::get_if<GprNode>(&*dest)) {
- if (gpr->GetIndex() == Register::ZeroIndex) {
- // Writing to Register::ZeroIndex is a no op but we still have to visit its source
- // because it might have side effects.
- Visit(src);
- return {};
- }
- target = {registers.at(gpr->GetIndex()), Type::Float};
-
- } else if (const auto abuf = std::get_if<AbufNode>(&*dest)) {
- const auto& buffer = abuf->GetBuffer();
- const auto ArrayPass = [&](Id pointer_type, Id composite, std::vector<u32> indices) {
- std::vector<Id> members;
- members.reserve(std::size(indices) + 1);
-
- if (buffer && IsOutputAttributeArray()) {
- members.push_back(AsUint(Visit(buffer)));
- }
- for (const u32 index : indices) {
- members.push_back(Constant(t_uint, index));
- }
- return OpAccessChain(pointer_type, composite, members);
- };
-
- target = [&]() -> Expression {
- const u32 element = abuf->GetElement();
- switch (const auto attribute = abuf->GetIndex(); attribute) {
- case Attribute::Index::Position: {
- const u32 index = out_indices.position.value();
- return {ArrayPass(t_out_float, out_vertex, {index, element}), Type::Float};
- }
- case Attribute::Index::LayerViewportPointSize:
- switch (element) {
- case 1: {
- if (!out_indices.layer) {
- return {};
- }
- const u32 index = out_indices.layer.value();
- return {AccessElement(t_out_int, out_vertex, index), Type::Int};
- }
- case 2: {
- if (!out_indices.viewport) {
- return {};
- }
- const u32 index = out_indices.viewport.value();
- return {AccessElement(t_out_int, out_vertex, index), Type::Int};
- }
- case 3: {
- const auto index = out_indices.point_size.value();
- return {AccessElement(t_out_float, out_vertex, index), Type::Float};
- }
- default:
- UNIMPLEMENTED_MSG("LayerViewportPoint element={}", abuf->GetElement());
- return {};
- }
- case Attribute::Index::ClipDistances0123: {
- const u32 index = out_indices.clip_distances.value();
- return {AccessElement(t_out_float, out_vertex, index, element), Type::Float};
- }
- case Attribute::Index::ClipDistances4567: {
- const u32 index = out_indices.clip_distances.value();
- return {AccessElement(t_out_float, out_vertex, index, element + 4),
- Type::Float};
- }
- default:
- if (IsGenericAttribute(attribute)) {
- const u8 offset = static_cast<u8>(static_cast<u8>(attribute) * 4 + element);
- const GenericVaryingDescription description = output_attributes.at(offset);
- const Id composite = description.id;
- std::vector<u32> indices;
- if (!description.is_scalar) {
- indices.push_back(element - description.first_element);
- }
- return {ArrayPass(t_out_float, composite, indices), Type::Float};
- }
- UNIMPLEMENTED_MSG("Unhandled output attribute: {}",
- static_cast<u32>(attribute));
- return {};
- }
- }();
-
- } else if (const auto patch = std::get_if<PatchNode>(&*dest)) {
- target = [&]() -> Expression {
- const u32 offset = patch->GetOffset();
- switch (offset) {
- case 0:
- case 1:
- case 2:
- case 3:
- return {AccessElement(t_out_float, tess_level_outer, offset % 4), Type::Float};
- case 4:
- case 5:
- return {AccessElement(t_out_float, tess_level_inner, offset % 4), Type::Float};
- }
- UNIMPLEMENTED_MSG("Unhandled patch output offset: {}", offset);
- return {};
- }();
-
- } else if (const auto lmem = std::get_if<LmemNode>(&*dest)) {
- Id address = AsUint(Visit(lmem->GetAddress()));
- address = OpUDiv(t_uint, address, Constant(t_uint, 4));
- target = {OpAccessChain(t_prv_float, local_memory, address), Type::Float};
-
- } else if (const auto smem = std::get_if<SmemNode>(&*dest)) {
- target = {GetSharedMemoryPointer(*smem), Type::Uint};
-
- } else if (const auto gmem = std::get_if<GmemNode>(&*dest)) {
- target = {GetGlobalMemoryPointer(*gmem), Type::Uint};
-
- } else if (const auto cv = std::get_if<CustomVarNode>(&*dest)) {
- target = {custom_variables.at(cv->GetIndex()), Type::Float};
-
- } else {
- UNIMPLEMENTED();
- }
-
- if (!target.id) {
- // On failure we return a nullptr target.id, skip these stores.
- return {};
- }
-
- OpStore(target.id, As(Visit(src), target.type));
- return {};
- }
-
- template <u32 offset>
- Expression FCastHalf(Operation operation) {
- const Id value = AsHalfFloat(Visit(operation[0]));
- return {GetFloatFromHalfScalar(OpCompositeExtract(t_scalar_half, value, offset)),
- Type::Float};
- }
-
- Expression FSwizzleAdd(Operation operation) {
- const Id minus = Constant(t_float, -1.0f);
- const Id plus = v_float_one;
- const Id zero = v_float_zero;
- const Id lut_a = ConstantComposite(t_float4, minus, plus, minus, zero);
- const Id lut_b = ConstantComposite(t_float4, minus, minus, plus, minus);
-
- Id mask = OpLoad(t_uint, thread_id);
- mask = OpBitwiseAnd(t_uint, mask, Constant(t_uint, 3));
- mask = OpShiftLeftLogical(t_uint, mask, Constant(t_uint, 1));
- mask = OpShiftRightLogical(t_uint, AsUint(Visit(operation[2])), mask);
- mask = OpBitwiseAnd(t_uint, mask, Constant(t_uint, 3));
-
- const Id modifier_a = OpVectorExtractDynamic(t_float, lut_a, mask);
- const Id modifier_b = OpVectorExtractDynamic(t_float, lut_b, mask);
-
- const Id op_a = OpFMul(t_float, AsFloat(Visit(operation[0])), modifier_a);
- const Id op_b = OpFMul(t_float, AsFloat(Visit(operation[1])), modifier_b);
- return {OpFAdd(t_float, op_a, op_b), Type::Float};
- }
-
- Expression HNegate(Operation operation) {
- const bool is_f16 = device.IsFloat16Supported();
- const Id minus_one = Constant(t_scalar_half, is_f16 ? 0xbc00 : 0xbf800000);
- const Id one = Constant(t_scalar_half, is_f16 ? 0x3c00 : 0x3f800000);
- const auto GetNegate = [&](std::size_t index) {
- return OpSelect(t_scalar_half, AsBool(Visit(operation[index])), minus_one, one);
- };
- const Id negation = OpCompositeConstruct(t_half, GetNegate(1), GetNegate(2));
- return {OpFMul(t_half, AsHalfFloat(Visit(operation[0])), negation), Type::HalfFloat};
- }
-
- Expression HClamp(Operation operation) {
- const auto Pack = [&](std::size_t index) {
- const Id scalar = GetHalfScalarFromFloat(AsFloat(Visit(operation[index])));
- return OpCompositeConstruct(t_half, scalar, scalar);
- };
- const Id value = AsHalfFloat(Visit(operation[0]));
- const Id min = Pack(1);
- const Id max = Pack(2);
-
- const Id clamped = OpFClamp(t_half, value, min, max);
- if (IsPrecise(operation)) {
- Decorate(clamped, spv::Decoration::NoContraction);
- }
- return {clamped, Type::HalfFloat};
- }
-
- Expression HCastFloat(Operation operation) {
- const Id value = GetHalfScalarFromFloat(AsFloat(Visit(operation[0])));
- return {OpCompositeConstruct(t_half, value, Constant(t_scalar_half, 0)), Type::HalfFloat};
- }
-
- Expression HUnpack(Operation operation) {
- Expression operand = Visit(operation[0]);
- const auto type = std::get<Tegra::Shader::HalfType>(operation.GetMeta());
- if (type == Tegra::Shader::HalfType::H0_H1) {
- return operand;
- }
- const auto value = [&] {
- switch (std::get<Tegra::Shader::HalfType>(operation.GetMeta())) {
- case Tegra::Shader::HalfType::F32:
- return GetHalfScalarFromFloat(AsFloat(operand));
- case Tegra::Shader::HalfType::H0_H0:
- return OpCompositeExtract(t_scalar_half, AsHalfFloat(operand), 0);
- case Tegra::Shader::HalfType::H1_H1:
- return OpCompositeExtract(t_scalar_half, AsHalfFloat(operand), 1);
- default:
- UNREACHABLE();
- return ConstantNull(t_half);
- }
- }();
- return {OpCompositeConstruct(t_half, value, value), Type::HalfFloat};
- }
-
- Expression HMergeF32(Operation operation) {
- const Id value = AsHalfFloat(Visit(operation[0]));
- return {GetFloatFromHalfScalar(OpCompositeExtract(t_scalar_half, value, 0)), Type::Float};
- }
-
- template <u32 offset>
- Expression HMergeHN(Operation operation) {
- const Id target = AsHalfFloat(Visit(operation[0]));
- const Id source = AsHalfFloat(Visit(operation[1]));
- const Id object = OpCompositeExtract(t_scalar_half, source, offset);
- return {OpCompositeInsert(t_half, object, target, offset), Type::HalfFloat};
- }
-
- Expression HPack2(Operation operation) {
- const Id low = GetHalfScalarFromFloat(AsFloat(Visit(operation[0])));
- const Id high = GetHalfScalarFromFloat(AsFloat(Visit(operation[1])));
- return {OpCompositeConstruct(t_half, low, high), Type::HalfFloat};
- }
-
- Expression LogicalAddCarry(Operation operation) {
- const Id op_a = AsUint(Visit(operation[0]));
- const Id op_b = AsUint(Visit(operation[1]));
-
- const Id result = OpIAddCarry(TypeStruct({t_uint, t_uint}), op_a, op_b);
- const Id carry = OpCompositeExtract(t_uint, result, 1);
- return {OpINotEqual(t_bool, carry, v_uint_zero), Type::Bool};
- }
-
- Expression LogicalAssign(Operation operation) {
- const Node& dest = operation[0];
- const Node& src = operation[1];
-
- Id target{};
- if (const auto pred = std::get_if<PredicateNode>(&*dest)) {
- ASSERT_MSG(!pred->IsNegated(), "Negating logical assignment");
-
- const auto index = pred->GetIndex();
- switch (index) {
- case Tegra::Shader::Pred::NeverExecute:
- case Tegra::Shader::Pred::UnusedIndex:
- // Writing to these predicates is a no-op
- return {};
- }
- target = predicates.at(index);
-
- } else if (const auto flag = std::get_if<InternalFlagNode>(&*dest)) {
- target = internal_flags.at(static_cast<u32>(flag->GetFlag()));
- }
-
- OpStore(target, AsBool(Visit(src)));
- return {};
- }
-
- Expression LogicalFOrdered(Operation operation) {
- // Emulate SPIR-V's OpOrdered
- const Id op_a = AsFloat(Visit(operation[0]));
- const Id op_b = AsFloat(Visit(operation[1]));
- const Id is_num_a = OpFOrdEqual(t_bool, op_a, op_a);
- const Id is_num_b = OpFOrdEqual(t_bool, op_b, op_b);
- return {OpLogicalAnd(t_bool, is_num_a, is_num_b), Type::Bool};
- }
-
- Expression LogicalFUnordered(Operation operation) {
- // Emulate SPIR-V's OpUnordered
- const Id op_a = AsFloat(Visit(operation[0]));
- const Id op_b = AsFloat(Visit(operation[1]));
- const Id is_nan_a = OpIsNan(t_bool, op_a);
- const Id is_nan_b = OpIsNan(t_bool, op_b);
- return {OpLogicalOr(t_bool, is_nan_a, is_nan_b), Type::Bool};
- }
-
- Id GetTextureSampler(Operation operation) {
- const auto& meta = std::get<MetaTexture>(operation.GetMeta());
- ASSERT(!meta.sampler.is_buffer);
-
- const auto& entry = sampled_images.at(meta.sampler.index);
- Id sampler = entry.variable;
- if (meta.sampler.is_indexed) {
- const Id index = AsInt(Visit(meta.index));
- sampler = OpAccessChain(entry.sampler_pointer_type, sampler, index);
- }
- return OpLoad(entry.sampler_type, sampler);
- }
-
- Id GetTextureImage(Operation operation) {
- const auto& meta = std::get<MetaTexture>(operation.GetMeta());
- const u32 index = meta.sampler.index;
- if (meta.sampler.is_buffer) {
- const auto& entry = uniform_texels.at(index);
- return OpLoad(entry.image_type, entry.image);
- } else {
- const auto& entry = sampled_images.at(index);
- return OpImage(entry.image_type, GetTextureSampler(operation));
- }
- }
-
- Id GetImage(Operation operation) {
- const auto& meta = std::get<MetaImage>(operation.GetMeta());
- const auto entry = images.at(meta.image.index);
- return OpLoad(entry.image_type, entry.image);
- }
-
- Id AssembleVector(const std::vector<Id>& coords, Type type) {
- const Id coords_type = GetTypeVectorDefinitionLut(type).at(coords.size() - 1);
- return coords.size() == 1 ? coords[0] : OpCompositeConstruct(coords_type, coords);
- }
-
- Id GetCoordinates(Operation operation, Type type) {
- std::vector<Id> coords;
- for (std::size_t i = 0; i < operation.GetOperandsCount(); ++i) {
- coords.push_back(As(Visit(operation[i]), type));
- }
- if (const auto meta = std::get_if<MetaTexture>(&operation.GetMeta())) {
- // Add array coordinate for textures
- if (meta->sampler.is_array) {
- Id array = AsInt(Visit(meta->array));
- if (type == Type::Float) {
- array = OpConvertSToF(t_float, array);
- }
- coords.push_back(array);
- }
- }
- return AssembleVector(coords, type);
- }
-
- Id GetOffsetCoordinates(Operation operation) {
- const auto& meta = std::get<MetaTexture>(operation.GetMeta());
- std::vector<Id> coords;
- coords.reserve(meta.aoffi.size());
- for (const auto& coord : meta.aoffi) {
- coords.push_back(AsInt(Visit(coord)));
- }
- return AssembleVector(coords, Type::Int);
- }
-
- std::pair<Id, Id> GetDerivatives(Operation operation) {
- const auto& meta = std::get<MetaTexture>(operation.GetMeta());
- const auto& derivatives = meta.derivates;
- ASSERT(derivatives.size() % 2 == 0);
-
- const std::size_t components = derivatives.size() / 2;
- std::vector<Id> dx, dy;
- dx.reserve(components);
- dy.reserve(components);
- for (std::size_t index = 0; index < components; ++index) {
- dx.push_back(AsFloat(Visit(derivatives.at(index * 2 + 0))));
- dy.push_back(AsFloat(Visit(derivatives.at(index * 2 + 1))));
- }
- return {AssembleVector(dx, Type::Float), AssembleVector(dy, Type::Float)};
- }
-
- Expression GetTextureElement(Operation operation, Id sample_value, Type type) {
- const auto& meta = std::get<MetaTexture>(operation.GetMeta());
- const auto type_def = GetTypeDefinition(type);
- return {OpCompositeExtract(type_def, sample_value, meta.element), type};
- }
-
- Expression Texture(Operation operation) {
- const auto& meta = std::get<MetaTexture>(operation.GetMeta());
-
- const bool can_implicit = stage == ShaderType::Fragment;
- const Id sampler = GetTextureSampler(operation);
- const Id coords = GetCoordinates(operation, Type::Float);
-
- std::vector<Id> operands;
- spv::ImageOperandsMask mask{};
- if (meta.bias) {
- mask = mask | spv::ImageOperandsMask::Bias;
- operands.push_back(AsFloat(Visit(meta.bias)));
- }
-
- if (!can_implicit) {
- mask = mask | spv::ImageOperandsMask::Lod;
- operands.push_back(v_float_zero);
- }
-
- if (!meta.aoffi.empty()) {
- mask = mask | spv::ImageOperandsMask::Offset;
- operands.push_back(GetOffsetCoordinates(operation));
- }
-
- if (meta.depth_compare) {
- // Depth sampling
- UNIMPLEMENTED_IF(meta.bias);
- const Id dref = AsFloat(Visit(meta.depth_compare));
- if (can_implicit) {
- return {
- OpImageSampleDrefImplicitLod(t_float, sampler, coords, dref, mask, operands),
- Type::Float};
- } else {
- return {
- OpImageSampleDrefExplicitLod(t_float, sampler, coords, dref, mask, operands),
- Type::Float};
- }
- }
-
- Id texture;
- if (can_implicit) {
- texture = OpImageSampleImplicitLod(t_float4, sampler, coords, mask, operands);
- } else {
- texture = OpImageSampleExplicitLod(t_float4, sampler, coords, mask, operands);
- }
- return GetTextureElement(operation, texture, Type::Float);
- }
-
- Expression TextureLod(Operation operation) {
- const auto& meta = std::get<MetaTexture>(operation.GetMeta());
-
- const Id sampler = GetTextureSampler(operation);
- const Id coords = GetCoordinates(operation, Type::Float);
- const Id lod = AsFloat(Visit(meta.lod));
-
- spv::ImageOperandsMask mask = spv::ImageOperandsMask::Lod;
- std::vector<Id> operands{lod};
-
- if (!meta.aoffi.empty()) {
- mask = mask | spv::ImageOperandsMask::Offset;
- operands.push_back(GetOffsetCoordinates(operation));
- }
-
- if (meta.sampler.is_shadow) {
- const Id dref = AsFloat(Visit(meta.depth_compare));
- return {OpImageSampleDrefExplicitLod(t_float, sampler, coords, dref, mask, operands),
- Type::Float};
- }
- const Id texture = OpImageSampleExplicitLod(t_float4, sampler, coords, mask, operands);
- return GetTextureElement(operation, texture, Type::Float);
- }
-
- Expression TextureGather(Operation operation) {
- const auto& meta = std::get<MetaTexture>(operation.GetMeta());
-
- const Id coords = GetCoordinates(operation, Type::Float);
-
- spv::ImageOperandsMask mask = spv::ImageOperandsMask::MaskNone;
- std::vector<Id> operands;
- Id texture{};
-
- if (!meta.aoffi.empty()) {
- mask = mask | spv::ImageOperandsMask::Offset;
- operands.push_back(GetOffsetCoordinates(operation));
- }
-
- if (meta.sampler.is_shadow) {
- texture = OpImageDrefGather(t_float4, GetTextureSampler(operation), coords,
- AsFloat(Visit(meta.depth_compare)), mask, operands);
- } else {
- u32 component_value = 0;
- if (meta.component) {
- const auto component = std::get_if<ImmediateNode>(&*meta.component);
- ASSERT_MSG(component, "Component is not an immediate value");
- component_value = component->GetValue();
- }
- texture = OpImageGather(t_float4, GetTextureSampler(operation), coords,
- Constant(t_uint, component_value), mask, operands);
- }
- return GetTextureElement(operation, texture, Type::Float);
- }
-
- Expression TextureQueryDimensions(Operation operation) {
- const auto& meta = std::get<MetaTexture>(operation.GetMeta());
- UNIMPLEMENTED_IF(!meta.aoffi.empty());
- UNIMPLEMENTED_IF(meta.depth_compare);
-
- const auto image_id = GetTextureImage(operation);
- if (meta.element == 3) {
- return {OpImageQueryLevels(t_int, image_id), Type::Int};
- }
-
- const Id lod = AsUint(Visit(operation[0]));
- const std::size_t coords_count = [&meta] {
- switch (const auto type = meta.sampler.type) {
- case Tegra::Shader::TextureType::Texture1D:
- return 1;
- case Tegra::Shader::TextureType::Texture2D:
- case Tegra::Shader::TextureType::TextureCube:
- return 2;
- case Tegra::Shader::TextureType::Texture3D:
- return 3;
- default:
- UNREACHABLE_MSG("Invalid texture type={}", type);
- return 2;
- }
- }();
-
- if (meta.element >= coords_count) {
- return {v_float_zero, Type::Float};
- }
-
- const std::array<Id, 3> types = {t_int, t_int2, t_int3};
- const Id sizes = OpImageQuerySizeLod(types.at(coords_count - 1), image_id, lod);
- const Id size = OpCompositeExtract(t_int, sizes, meta.element);
- return {size, Type::Int};
- }
-
- Expression TextureQueryLod(Operation operation) {
- const auto& meta = std::get<MetaTexture>(operation.GetMeta());
- UNIMPLEMENTED_IF(!meta.aoffi.empty());
- UNIMPLEMENTED_IF(meta.depth_compare);
-
- if (meta.element >= 2) {
- UNREACHABLE_MSG("Invalid element");
- return {v_float_zero, Type::Float};
- }
- const auto sampler_id = GetTextureSampler(operation);
-
- const Id multiplier = Constant(t_float, 256.0f);
- const Id multipliers = ConstantComposite(t_float2, multiplier, multiplier);
-
- const Id coords = GetCoordinates(operation, Type::Float);
- Id size = OpImageQueryLod(t_float2, sampler_id, coords);
- size = OpFMul(t_float2, size, multipliers);
- size = OpConvertFToS(t_int2, size);
- return GetTextureElement(operation, size, Type::Int);
- }
-
- Expression TexelFetch(Operation operation) {
- const auto& meta = std::get<MetaTexture>(operation.GetMeta());
- UNIMPLEMENTED_IF(meta.depth_compare);
-
- const Id image = GetTextureImage(operation);
- const Id coords = GetCoordinates(operation, Type::Int);
-
- spv::ImageOperandsMask mask = spv::ImageOperandsMask::MaskNone;
- std::vector<Id> operands;
- Id fetch;
-
- if (meta.lod && !meta.sampler.is_buffer) {
- mask = mask | spv::ImageOperandsMask::Lod;
- operands.push_back(AsInt(Visit(meta.lod)));
- }
-
- if (!meta.aoffi.empty()) {
- mask = mask | spv::ImageOperandsMask::Offset;
- operands.push_back(GetOffsetCoordinates(operation));
- }
-
- fetch = OpImageFetch(t_float4, image, coords, mask, operands);
- return GetTextureElement(operation, fetch, Type::Float);
- }
-
- Expression TextureGradient(Operation operation) {
- const auto& meta = std::get<MetaTexture>(operation.GetMeta());
- UNIMPLEMENTED_IF(!meta.aoffi.empty());
-
- const Id sampler = GetTextureSampler(operation);
- const Id coords = GetCoordinates(operation, Type::Float);
- const auto [dx, dy] = GetDerivatives(operation);
- const std::vector grad = {dx, dy};
-
- static constexpr auto mask = spv::ImageOperandsMask::Grad;
- const Id texture = OpImageSampleExplicitLod(t_float4, sampler, coords, mask, grad);
- return GetTextureElement(operation, texture, Type::Float);
- }
-
- Expression ImageLoad(Operation operation) {
- if (!device.IsFormatlessImageLoadSupported()) {
- return {v_float_zero, Type::Float};
- }
-
- const auto& meta{std::get<MetaImage>(operation.GetMeta())};
-
- const Id coords = GetCoordinates(operation, Type::Int);
- const Id texel = OpImageRead(t_uint4, GetImage(operation), coords);
-
- return {OpCompositeExtract(t_uint, texel, meta.element), Type::Uint};
- }
-
- Expression ImageStore(Operation operation) {
- const auto meta{std::get<MetaImage>(operation.GetMeta())};
- std::vector<Id> colors;
- for (const auto& value : meta.values) {
- colors.push_back(AsUint(Visit(value)));
- }
-
- const Id coords = GetCoordinates(operation, Type::Int);
- const Id texel = OpCompositeConstruct(t_uint4, colors);
-
- OpImageWrite(GetImage(operation), coords, texel, {});
- return {};
- }
-
- template <Id (Module::*func)(Id, Id, Id, Id, Id)>
- Expression AtomicImage(Operation operation) {
- const auto& meta{std::get<MetaImage>(operation.GetMeta())};
- ASSERT(meta.values.size() == 1);
-
- const Id coordinate = GetCoordinates(operation, Type::Int);
- const Id image = images.at(meta.image.index).image;
- const Id sample = v_uint_zero;
- const Id pointer = OpImageTexelPointer(t_image_uint, image, coordinate, sample);
-
- const Id scope = Constant(t_uint, static_cast<u32>(spv::Scope::Device));
- const Id semantics = v_uint_zero;
- const Id value = AsUint(Visit(meta.values[0]));
- return {(this->*func)(t_uint, pointer, scope, semantics, value), Type::Uint};
- }
-
- template <Id (Module::*func)(Id, Id, Id, Id, Id)>
- Expression Atomic(Operation operation) {
- Id pointer;
- if (const auto smem = std::get_if<SmemNode>(&*operation[0])) {
- pointer = GetSharedMemoryPointer(*smem);
- } else if (const auto gmem = std::get_if<GmemNode>(&*operation[0])) {
- pointer = GetGlobalMemoryPointer(*gmem);
- } else {
- UNREACHABLE();
- return {v_float_zero, Type::Float};
- }
- const Id scope = Constant(t_uint, static_cast<u32>(spv::Scope::Device));
- const Id semantics = v_uint_zero;
- const Id value = AsUint(Visit(operation[1]));
-
- return {(this->*func)(t_uint, pointer, scope, semantics, value), Type::Uint};
- }
-
- template <Id (Module::*func)(Id, Id, Id, Id, Id)>
- Expression Reduce(Operation operation) {
- Atomic<func>(operation);
- return {};
- }
-
- Expression Branch(Operation operation) {
- const auto& target = std::get<ImmediateNode>(*operation[0]);
- OpStore(jmp_to, Constant(t_uint, target.GetValue()));
- OpBranch(continue_label);
- inside_branch = true;
- if (!conditional_branch_set) {
- AddLabel();
- }
- return {};
- }
-
- Expression BranchIndirect(Operation operation) {
- const Id op_a = AsUint(Visit(operation[0]));
-
- OpStore(jmp_to, op_a);
- OpBranch(continue_label);
- inside_branch = true;
- if (!conditional_branch_set) {
- AddLabel();
- }
- return {};
- }
-
- Expression PushFlowStack(Operation operation) {
- const auto& target = std::get<ImmediateNode>(*operation[0]);
- const auto [flow_stack, flow_stack_top] = GetFlowStack(operation);
- const Id current = OpLoad(t_uint, flow_stack_top);
- const Id next = OpIAdd(t_uint, current, Constant(t_uint, 1));
- const Id access = OpAccessChain(t_func_uint, flow_stack, current);
-
- OpStore(access, Constant(t_uint, target.GetValue()));
- OpStore(flow_stack_top, next);
- return {};
- }
-
- Expression PopFlowStack(Operation operation) {
- const auto [flow_stack, flow_stack_top] = GetFlowStack(operation);
- const Id current = OpLoad(t_uint, flow_stack_top);
- const Id previous = OpISub(t_uint, current, Constant(t_uint, 1));
- const Id access = OpAccessChain(t_func_uint, flow_stack, previous);
- const Id target = OpLoad(t_uint, access);
-
- OpStore(flow_stack_top, previous);
- OpStore(jmp_to, target);
- OpBranch(continue_label);
- inside_branch = true;
- if (!conditional_branch_set) {
- AddLabel();
- }
- return {};
- }
-
- Id MaxwellToSpirvComparison(Maxwell::ComparisonOp compare_op, Id operand_1, Id operand_2) {
- using Compare = Maxwell::ComparisonOp;
- switch (compare_op) {
- case Compare::NeverOld:
- return v_false; // Never let the test pass
- case Compare::LessOld:
- return OpFOrdLessThan(t_bool, operand_1, operand_2);
- case Compare::EqualOld:
- return OpFOrdEqual(t_bool, operand_1, operand_2);
- case Compare::LessEqualOld:
- return OpFOrdLessThanEqual(t_bool, operand_1, operand_2);
- case Compare::GreaterOld:
- return OpFOrdGreaterThan(t_bool, operand_1, operand_2);
- case Compare::NotEqualOld:
- return OpFOrdNotEqual(t_bool, operand_1, operand_2);
- case Compare::GreaterEqualOld:
- return OpFOrdGreaterThanEqual(t_bool, operand_1, operand_2);
- default:
- UNREACHABLE();
- return v_true;
- }
- }
-
- void AlphaTest(Id pointer) {
- if (specialization.alpha_test_func == Maxwell::ComparisonOp::AlwaysOld) {
- return;
- }
- const Id true_label = OpLabel();
- const Id discard_label = OpLabel();
- const Id alpha_reference = Constant(t_float, specialization.alpha_test_ref);
- const Id alpha_value = OpLoad(t_float, pointer);
- const Id condition =
- MaxwellToSpirvComparison(specialization.alpha_test_func, alpha_value, alpha_reference);
-
- OpBranchConditional(condition, true_label, discard_label);
- AddLabel(discard_label);
- OpKill();
- AddLabel(true_label);
- }
-
- void PreExit() {
- if (stage == ShaderType::Vertex && specialization.ndc_minus_one_to_one) {
- const u32 position_index = out_indices.position.value();
- const Id z_pointer = AccessElement(t_out_float, out_vertex, position_index, 2U);
- const Id w_pointer = AccessElement(t_out_float, out_vertex, position_index, 3U);
- Id depth = OpLoad(t_float, z_pointer);
- depth = OpFAdd(t_float, depth, OpLoad(t_float, w_pointer));
- depth = OpFMul(t_float, depth, Constant(t_float, 0.5f));
- OpStore(z_pointer, depth);
- }
- if (stage == ShaderType::Fragment) {
- const auto SafeGetRegister = [this](u32 reg) {
- if (const auto it = registers.find(reg); it != registers.end()) {
- return OpLoad(t_float, it->second);
- }
- return v_float_zero;
- };
-
- UNIMPLEMENTED_IF_MSG(header.ps.omap.sample_mask != 0,
- "Sample mask write is unimplemented");
-
- // Write the color outputs using the data in the shader registers, disabled
- // rendertargets/components are skipped in the register assignment.
- u32 current_reg = 0;
- for (u32 rt = 0; rt < Maxwell::NumRenderTargets; ++rt) {
- // TODO(Subv): Figure out how dual-source blending is configured in the Switch.
- for (u32 component = 0; component < 4; ++component) {
- if (!header.ps.IsColorComponentOutputEnabled(rt, component)) {
- continue;
- }
- const Id pointer = AccessElement(t_out_float, frag_colors[rt], component);
- OpStore(pointer, SafeGetRegister(current_reg));
- if (rt == 0 && component == 3) {
- AlphaTest(pointer);
- }
- ++current_reg;
- }
- }
- if (header.ps.omap.depth) {
- // The depth output is always 2 registers after the last color output, and
- // current_reg already contains one past the last color register.
- OpStore(frag_depth, SafeGetRegister(current_reg + 1));
- }
- }
- }
-
- Expression Exit(Operation operation) {
- PreExit();
- inside_branch = true;
- if (conditional_branch_set) {
- OpReturn();
- } else {
- const Id dummy = OpLabel();
- OpBranch(dummy);
- AddLabel(dummy);
- OpReturn();
- AddLabel();
- }
- return {};
- }
-
- Expression Discard(Operation operation) {
- inside_branch = true;
- if (conditional_branch_set) {
- OpKill();
- } else {
- const Id dummy = OpLabel();
- OpBranch(dummy);
- AddLabel(dummy);
- OpKill();
- AddLabel();
- }
- return {};
- }
-
- Expression EmitVertex(Operation) {
- OpEmitVertex();
- return {};
- }
-
- Expression EndPrimitive(Operation operation) {
- OpEndPrimitive();
- return {};
- }
-
- Expression InvocationId(Operation) {
- return {OpLoad(t_int, invocation_id), Type::Int};
- }
-
- Expression YNegate(Operation) {
- LOG_WARNING(Render_Vulkan, "(STUBBED)");
- return {Constant(t_float, 1.0f), Type::Float};
- }
-
- template <u32 element>
- Expression LocalInvocationId(Operation) {
- const Id id = OpLoad(t_uint3, local_invocation_id);
- return {OpCompositeExtract(t_uint, id, element), Type::Uint};
- }
-
- template <u32 element>
- Expression WorkGroupId(Operation operation) {
- const Id id = OpLoad(t_uint3, workgroup_id);
- return {OpCompositeExtract(t_uint, id, element), Type::Uint};
- }
-
- Expression BallotThread(Operation operation) {
- const Id predicate = AsBool(Visit(operation[0]));
- const Id ballot = OpSubgroupBallotKHR(t_uint4, predicate);
-
- if (!device.IsWarpSizePotentiallyBiggerThanGuest()) {
- // Guest-like devices can just return the first index.
- return {OpCompositeExtract(t_uint, ballot, 0U), Type::Uint};
- }
-
- // The others will have to return what is local to the current thread.
- // For instance a device with a warp size of 64 will return the upper uint when the current
- // thread is 38.
- const Id tid = OpLoad(t_uint, thread_id);
- const Id thread_index = OpShiftRightLogical(t_uint, tid, Constant(t_uint, 5));
- return {OpVectorExtractDynamic(t_uint, ballot, thread_index), Type::Uint};
- }
-
- template <Id (Module::*func)(Id, Id)>
- Expression Vote(Operation operation) {
- // TODO(Rodrigo): Handle devices with different warp sizes
- const Id predicate = AsBool(Visit(operation[0]));
- return {(this->*func)(t_bool, predicate), Type::Bool};
- }
-
- Expression ThreadId(Operation) {
- return {OpLoad(t_uint, thread_id), Type::Uint};
- }
-
- template <std::size_t index>
- Expression ThreadMask(Operation) {
- // TODO(Rodrigo): Handle devices with different warp sizes
- const Id mask = thread_masks[index];
- return {OpLoad(t_uint, AccessElement(t_in_uint, mask, 0)), Type::Uint};
- }
-
- Expression ShuffleIndexed(Operation operation) {
- const Id value = AsFloat(Visit(operation[0]));
- const Id index = AsUint(Visit(operation[1]));
- return {OpSubgroupReadInvocationKHR(t_float, value, index), Type::Float};
- }
-
- Expression Barrier(Operation) {
- if (!ir.IsDecompiled()) {
- LOG_ERROR(Render_Vulkan, "OpBarrier used by shader is not decompiled");
- return {};
- }
-
- const auto scope = spv::Scope::Workgroup;
- const auto memory = spv::Scope::Workgroup;
- const auto semantics =
- spv::MemorySemanticsMask::WorkgroupMemory | spv::MemorySemanticsMask::AcquireRelease;
- OpControlBarrier(Constant(t_uint, static_cast<u32>(scope)),
- Constant(t_uint, static_cast<u32>(memory)),
- Constant(t_uint, static_cast<u32>(semantics)));
- return {};
- }
-
- template <spv::Scope scope>
- Expression MemoryBarrier(Operation) {
- const auto semantics =
- spv::MemorySemanticsMask::AcquireRelease | spv::MemorySemanticsMask::UniformMemory |
- spv::MemorySemanticsMask::WorkgroupMemory |
- spv::MemorySemanticsMask::AtomicCounterMemory | spv::MemorySemanticsMask::ImageMemory;
-
- OpMemoryBarrier(Constant(t_uint, static_cast<u32>(scope)),
- Constant(t_uint, static_cast<u32>(semantics)));
- return {};
- }
-
- Id DeclareBuiltIn(spv::BuiltIn builtin, spv::StorageClass storage, Id type, std::string name) {
- const Id id = OpVariable(type, storage);
- Decorate(id, spv::Decoration::BuiltIn, static_cast<u32>(builtin));
- AddGlobalVariable(Name(id, std::move(name)));
- interfaces.push_back(id);
- return id;
- }
-
- Id DeclareInputBuiltIn(spv::BuiltIn builtin, Id type, std::string name) {
- return DeclareBuiltIn(builtin, spv::StorageClass::Input, type, std::move(name));
- }
-
- template <typename... Args>
- Id AccessElement(Id pointer_type, Id composite, Args... elements_) {
- std::vector<Id> members;
- auto elements = {elements_...};
- for (const auto element : elements) {
- members.push_back(Constant(t_uint, element));
- }
-
- return OpAccessChain(pointer_type, composite, members);
- }
-
- Id As(Expression expr, Type wanted_type) {
- switch (wanted_type) {
- case Type::Bool:
- return AsBool(expr);
- case Type::Bool2:
- return AsBool2(expr);
- case Type::Float:
- return AsFloat(expr);
- case Type::Int:
- return AsInt(expr);
- case Type::Uint:
- return AsUint(expr);
- case Type::HalfFloat:
- return AsHalfFloat(expr);
- default:
- UNREACHABLE();
- return expr.id;
- }
- }
-
- Id AsBool(Expression expr) {
- ASSERT(expr.type == Type::Bool);
- return expr.id;
- }
-
- Id AsBool2(Expression expr) {
- ASSERT(expr.type == Type::Bool2);
- return expr.id;
- }
-
- Id AsFloat(Expression expr) {
- switch (expr.type) {
- case Type::Float:
- return expr.id;
- case Type::Int:
- case Type::Uint:
- return OpBitcast(t_float, expr.id);
- case Type::HalfFloat:
- if (device.IsFloat16Supported()) {
- return OpBitcast(t_float, expr.id);
- }
- return OpBitcast(t_float, OpPackHalf2x16(t_uint, expr.id));
- default:
- UNREACHABLE();
- return expr.id;
- }
- }
-
- Id AsInt(Expression expr) {
- switch (expr.type) {
- case Type::Int:
- return expr.id;
- case Type::Float:
- case Type::Uint:
- return OpBitcast(t_int, expr.id);
- case Type::HalfFloat:
- if (device.IsFloat16Supported()) {
- return OpBitcast(t_int, expr.id);
- }
- return OpPackHalf2x16(t_int, expr.id);
- default:
- UNREACHABLE();
- return expr.id;
- }
- }
-
- Id AsUint(Expression expr) {
- switch (expr.type) {
- case Type::Uint:
- return expr.id;
- case Type::Float:
- case Type::Int:
- return OpBitcast(t_uint, expr.id);
- case Type::HalfFloat:
- if (device.IsFloat16Supported()) {
- return OpBitcast(t_uint, expr.id);
- }
- return OpPackHalf2x16(t_uint, expr.id);
- default:
- UNREACHABLE();
- return expr.id;
- }
- }
-
- Id AsHalfFloat(Expression expr) {
- switch (expr.type) {
- case Type::HalfFloat:
- return expr.id;
- case Type::Float:
- case Type::Int:
- case Type::Uint:
- if (device.IsFloat16Supported()) {
- return OpBitcast(t_half, expr.id);
- }
- return OpUnpackHalf2x16(t_half, AsUint(expr));
- default:
- UNREACHABLE();
- return expr.id;
- }
- }
-
- Id GetHalfScalarFromFloat(Id value) {
- if (device.IsFloat16Supported()) {
- return OpFConvert(t_scalar_half, value);
- }
- return value;
- }
-
- Id GetFloatFromHalfScalar(Id value) {
- if (device.IsFloat16Supported()) {
- return OpFConvert(t_float, value);
- }
- return value;
- }
-
- AttributeType GetAttributeType(u32 location) const {
- if (stage != ShaderType::Vertex) {
- return {Type::Float, t_in_float, t_in_float4};
- }
- switch (specialization.attribute_types.at(location)) {
- case Maxwell::VertexAttribute::Type::SignedNorm:
- case Maxwell::VertexAttribute::Type::UnsignedNorm:
- case Maxwell::VertexAttribute::Type::UnsignedScaled:
- case Maxwell::VertexAttribute::Type::SignedScaled:
- case Maxwell::VertexAttribute::Type::Float:
- return {Type::Float, t_in_float, t_in_float4};
- case Maxwell::VertexAttribute::Type::SignedInt:
- return {Type::Int, t_in_int, t_in_int4};
- case Maxwell::VertexAttribute::Type::UnsignedInt:
- return {Type::Uint, t_in_uint, t_in_uint4};
- default:
- UNREACHABLE();
- return {Type::Float, t_in_float, t_in_float4};
- }
- }
-
- Id GetTypeDefinition(Type type) const {
- switch (type) {
- case Type::Bool:
- return t_bool;
- case Type::Bool2:
- return t_bool2;
- case Type::Float:
- return t_float;
- case Type::Int:
- return t_int;
- case Type::Uint:
- return t_uint;
- case Type::HalfFloat:
- return t_half;
- default:
- UNREACHABLE();
- return {};
- }
- }
-
- std::array<Id, 4> GetTypeVectorDefinitionLut(Type type) const {
- switch (type) {
- case Type::Float:
- return {t_float, t_float2, t_float3, t_float4};
- case Type::Int:
- return {t_int, t_int2, t_int3, t_int4};
- case Type::Uint:
- return {t_uint, t_uint2, t_uint3, t_uint4};
- default:
- UNIMPLEMENTED();
- return {};
- }
- }
-
- std::tuple<Id, Id> CreateFlowStack() {
- // TODO(Rodrigo): Figure out the actual depth of the flow stack, for now it seems unlikely
- // that shaders will use 20 nested SSYs and PBKs.
- constexpr u32 FLOW_STACK_SIZE = 20;
- constexpr auto storage_class = spv::StorageClass::Function;
-
- const Id flow_stack_type = TypeArray(t_uint, Constant(t_uint, FLOW_STACK_SIZE));
- const Id stack = OpVariable(TypePointer(storage_class, flow_stack_type), storage_class,
- ConstantNull(flow_stack_type));
- const Id top = OpVariable(t_func_uint, storage_class, Constant(t_uint, 0));
- AddLocalVariable(stack);
- AddLocalVariable(top);
- return std::tie(stack, top);
- }
-
- std::pair<Id, Id> GetFlowStack(Operation operation) {
- const auto stack_class = std::get<MetaStackClass>(operation.GetMeta());
- switch (stack_class) {
- case MetaStackClass::Ssy:
- return {ssy_flow_stack, ssy_flow_stack_top};
- case MetaStackClass::Pbk:
- return {pbk_flow_stack, pbk_flow_stack_top};
- }
- UNREACHABLE();
- return {};
- }
-
- Id GetGlobalMemoryPointer(const GmemNode& gmem) {
- const Id real = AsUint(Visit(gmem.GetRealAddress()));
- const Id base = AsUint(Visit(gmem.GetBaseAddress()));
- const Id diff = OpISub(t_uint, real, base);
- const Id offset = OpShiftRightLogical(t_uint, diff, Constant(t_uint, 2));
- const Id buffer = global_buffers.at(gmem.GetDescriptor());
- return OpAccessChain(t_gmem_uint, buffer, Constant(t_uint, 0), offset);
- }
-
- Id GetSharedMemoryPointer(const SmemNode& smem) {
- ASSERT(stage == ShaderType::Compute);
- Id address = AsUint(Visit(smem.GetAddress()));
- address = OpShiftRightLogical(t_uint, address, Constant(t_uint, 2U));
- return OpAccessChain(t_smem_uint, shared_memory, address);
- }
-
- static constexpr std::array operation_decompilers = {
- &SPIRVDecompiler::Assign,
-
- &SPIRVDecompiler::Ternary<&Module::OpSelect, Type::Float, Type::Bool, Type::Float,
- Type::Float>,
-
- &SPIRVDecompiler::Binary<&Module::OpFAdd, Type::Float>,
- &SPIRVDecompiler::Binary<&Module::OpFMul, Type::Float>,
- &SPIRVDecompiler::Binary<&Module::OpFDiv, Type::Float>,
- &SPIRVDecompiler::Ternary<&Module::OpFma, Type::Float>,
- &SPIRVDecompiler::Unary<&Module::OpFNegate, Type::Float>,
- &SPIRVDecompiler::Unary<&Module::OpFAbs, Type::Float>,
- &SPIRVDecompiler::Ternary<&Module::OpFClamp, Type::Float>,
- &SPIRVDecompiler::FCastHalf<0>,
- &SPIRVDecompiler::FCastHalf<1>,
- &SPIRVDecompiler::Binary<&Module::OpFMin, Type::Float>,
- &SPIRVDecompiler::Binary<&Module::OpFMax, Type::Float>,
- &SPIRVDecompiler::Unary<&Module::OpCos, Type::Float>,
- &SPIRVDecompiler::Unary<&Module::OpSin, Type::Float>,
- &SPIRVDecompiler::Unary<&Module::OpExp2, Type::Float>,
- &SPIRVDecompiler::Unary<&Module::OpLog2, Type::Float>,
- &SPIRVDecompiler::Unary<&Module::OpInverseSqrt, Type::Float>,
- &SPIRVDecompiler::Unary<&Module::OpSqrt, Type::Float>,
- &SPIRVDecompiler::Unary<&Module::OpRoundEven, Type::Float>,
- &SPIRVDecompiler::Unary<&Module::OpFloor, Type::Float>,
- &SPIRVDecompiler::Unary<&Module::OpCeil, Type::Float>,
- &SPIRVDecompiler::Unary<&Module::OpTrunc, Type::Float>,
- &SPIRVDecompiler::Unary<&Module::OpConvertSToF, Type::Float, Type::Int>,
- &SPIRVDecompiler::Unary<&Module::OpConvertUToF, Type::Float, Type::Uint>,
- &SPIRVDecompiler::FSwizzleAdd,
-
- &SPIRVDecompiler::Binary<&Module::OpIAdd, Type::Int>,
- &SPIRVDecompiler::Binary<&Module::OpIMul, Type::Int>,
- &SPIRVDecompiler::Binary<&Module::OpSDiv, Type::Int>,
- &SPIRVDecompiler::Unary<&Module::OpSNegate, Type::Int>,
- &SPIRVDecompiler::Unary<&Module::OpSAbs, Type::Int>,
- &SPIRVDecompiler::Binary<&Module::OpSMin, Type::Int>,
- &SPIRVDecompiler::Binary<&Module::OpSMax, Type::Int>,
-
- &SPIRVDecompiler::Unary<&Module::OpConvertFToS, Type::Int, Type::Float>,
- &SPIRVDecompiler::Unary<&Module::OpBitcast, Type::Int, Type::Uint>,
- &SPIRVDecompiler::Binary<&Module::OpShiftLeftLogical, Type::Int, Type::Int, Type::Uint>,
- &SPIRVDecompiler::Binary<&Module::OpShiftRightLogical, Type::Int, Type::Int, Type::Uint>,
- &SPIRVDecompiler::Binary<&Module::OpShiftRightArithmetic, Type::Int, Type::Int, Type::Uint>,
- &SPIRVDecompiler::Binary<&Module::OpBitwiseAnd, Type::Int>,
- &SPIRVDecompiler::Binary<&Module::OpBitwiseOr, Type::Int>,
- &SPIRVDecompiler::Binary<&Module::OpBitwiseXor, Type::Int>,
- &SPIRVDecompiler::Unary<&Module::OpNot, Type::Int>,
- &SPIRVDecompiler::Quaternary<&Module::OpBitFieldInsert, Type::Int>,
- &SPIRVDecompiler::Ternary<&Module::OpBitFieldSExtract, Type::Int>,
- &SPIRVDecompiler::Unary<&Module::OpBitCount, Type::Int>,
- &SPIRVDecompiler::Unary<&Module::OpFindSMsb, Type::Int>,
-
- &SPIRVDecompiler::Binary<&Module::OpIAdd, Type::Uint>,
- &SPIRVDecompiler::Binary<&Module::OpIMul, Type::Uint>,
- &SPIRVDecompiler::Binary<&Module::OpUDiv, Type::Uint>,
- &SPIRVDecompiler::Binary<&Module::OpUMin, Type::Uint>,
- &SPIRVDecompiler::Binary<&Module::OpUMax, Type::Uint>,
- &SPIRVDecompiler::Unary<&Module::OpConvertFToU, Type::Uint, Type::Float>,
- &SPIRVDecompiler::Unary<&Module::OpBitcast, Type::Uint, Type::Int>,
- &SPIRVDecompiler::Binary<&Module::OpShiftLeftLogical, Type::Uint>,
- &SPIRVDecompiler::Binary<&Module::OpShiftRightLogical, Type::Uint>,
- &SPIRVDecompiler::Binary<&Module::OpShiftRightLogical, Type::Uint>,
- &SPIRVDecompiler::Binary<&Module::OpBitwiseAnd, Type::Uint>,
- &SPIRVDecompiler::Binary<&Module::OpBitwiseOr, Type::Uint>,
- &SPIRVDecompiler::Binary<&Module::OpBitwiseXor, Type::Uint>,
- &SPIRVDecompiler::Unary<&Module::OpNot, Type::Uint>,
- &SPIRVDecompiler::Quaternary<&Module::OpBitFieldInsert, Type::Uint>,
- &SPIRVDecompiler::Ternary<&Module::OpBitFieldUExtract, Type::Uint>,
- &SPIRVDecompiler::Unary<&Module::OpBitCount, Type::Uint>,
- &SPIRVDecompiler::Unary<&Module::OpFindUMsb, Type::Uint>,
-
- &SPIRVDecompiler::Binary<&Module::OpFAdd, Type::HalfFloat>,
- &SPIRVDecompiler::Binary<&Module::OpFMul, Type::HalfFloat>,
- &SPIRVDecompiler::Ternary<&Module::OpFma, Type::HalfFloat>,
- &SPIRVDecompiler::Unary<&Module::OpFAbs, Type::HalfFloat>,
- &SPIRVDecompiler::HNegate,
- &SPIRVDecompiler::HClamp,
- &SPIRVDecompiler::HCastFloat,
- &SPIRVDecompiler::HUnpack,
- &SPIRVDecompiler::HMergeF32,
- &SPIRVDecompiler::HMergeHN<0>,
- &SPIRVDecompiler::HMergeHN<1>,
- &SPIRVDecompiler::HPack2,
-
- &SPIRVDecompiler::LogicalAssign,
- &SPIRVDecompiler::Binary<&Module::OpLogicalAnd, Type::Bool>,
- &SPIRVDecompiler::Binary<&Module::OpLogicalOr, Type::Bool>,
- &SPIRVDecompiler::Binary<&Module::OpLogicalNotEqual, Type::Bool>,
- &SPIRVDecompiler::Unary<&Module::OpLogicalNot, Type::Bool>,
- &SPIRVDecompiler::Binary<&Module::OpVectorExtractDynamic, Type::Bool, Type::Bool2,
- Type::Uint>,
- &SPIRVDecompiler::Unary<&Module::OpAll, Type::Bool, Type::Bool2>,
-
- &SPIRVDecompiler::Binary<&Module::OpFOrdLessThan, Type::Bool, Type::Float>,
- &SPIRVDecompiler::Binary<&Module::OpFOrdEqual, Type::Bool, Type::Float>,
- &SPIRVDecompiler::Binary<&Module::OpFOrdLessThanEqual, Type::Bool, Type::Float>,
- &SPIRVDecompiler::Binary<&Module::OpFOrdGreaterThan, Type::Bool, Type::Float>,
- &SPIRVDecompiler::Binary<&Module::OpFOrdNotEqual, Type::Bool, Type::Float>,
- &SPIRVDecompiler::Binary<&Module::OpFOrdGreaterThanEqual, Type::Bool, Type::Float>,
- &SPIRVDecompiler::LogicalFOrdered,
- &SPIRVDecompiler::LogicalFUnordered,
- &SPIRVDecompiler::Binary<&Module::OpFUnordLessThan, Type::Bool, Type::Float>,
- &SPIRVDecompiler::Binary<&Module::OpFUnordEqual, Type::Bool, Type::Float>,
- &SPIRVDecompiler::Binary<&Module::OpFUnordLessThanEqual, Type::Bool, Type::Float>,
- &SPIRVDecompiler::Binary<&Module::OpFUnordGreaterThan, Type::Bool, Type::Float>,
- &SPIRVDecompiler::Binary<&Module::OpFUnordNotEqual, Type::Bool, Type::Float>,
- &SPIRVDecompiler::Binary<&Module::OpFUnordGreaterThanEqual, Type::Bool, Type::Float>,
-
- &SPIRVDecompiler::Binary<&Module::OpSLessThan, Type::Bool, Type::Int>,
- &SPIRVDecompiler::Binary<&Module::OpIEqual, Type::Bool, Type::Int>,
- &SPIRVDecompiler::Binary<&Module::OpSLessThanEqual, Type::Bool, Type::Int>,
- &SPIRVDecompiler::Binary<&Module::OpSGreaterThan, Type::Bool, Type::Int>,
- &SPIRVDecompiler::Binary<&Module::OpINotEqual, Type::Bool, Type::Int>,
- &SPIRVDecompiler::Binary<&Module::OpSGreaterThanEqual, Type::Bool, Type::Int>,
-
- &SPIRVDecompiler::Binary<&Module::OpULessThan, Type::Bool, Type::Uint>,
- &SPIRVDecompiler::Binary<&Module::OpIEqual, Type::Bool, Type::Uint>,
- &SPIRVDecompiler::Binary<&Module::OpULessThanEqual, Type::Bool, Type::Uint>,
- &SPIRVDecompiler::Binary<&Module::OpUGreaterThan, Type::Bool, Type::Uint>,
- &SPIRVDecompiler::Binary<&Module::OpINotEqual, Type::Bool, Type::Uint>,
- &SPIRVDecompiler::Binary<&Module::OpUGreaterThanEqual, Type::Bool, Type::Uint>,
-
- &SPIRVDecompiler::LogicalAddCarry,
-
- &SPIRVDecompiler::Binary<&Module::OpFOrdLessThan, Type::Bool2, Type::HalfFloat>,
- &SPIRVDecompiler::Binary<&Module::OpFOrdEqual, Type::Bool2, Type::HalfFloat>,
- &SPIRVDecompiler::Binary<&Module::OpFOrdLessThanEqual, Type::Bool2, Type::HalfFloat>,
- &SPIRVDecompiler::Binary<&Module::OpFOrdGreaterThan, Type::Bool2, Type::HalfFloat>,
- &SPIRVDecompiler::Binary<&Module::OpFOrdNotEqual, Type::Bool2, Type::HalfFloat>,
- &SPIRVDecompiler::Binary<&Module::OpFOrdGreaterThanEqual, Type::Bool2, Type::HalfFloat>,
- // TODO(Rodrigo): Should these use the OpFUnord* variants?
- &SPIRVDecompiler::Binary<&Module::OpFOrdLessThan, Type::Bool2, Type::HalfFloat>,
- &SPIRVDecompiler::Binary<&Module::OpFOrdEqual, Type::Bool2, Type::HalfFloat>,
- &SPIRVDecompiler::Binary<&Module::OpFOrdLessThanEqual, Type::Bool2, Type::HalfFloat>,
- &SPIRVDecompiler::Binary<&Module::OpFOrdGreaterThan, Type::Bool2, Type::HalfFloat>,
- &SPIRVDecompiler::Binary<&Module::OpFOrdNotEqual, Type::Bool2, Type::HalfFloat>,
- &SPIRVDecompiler::Binary<&Module::OpFOrdGreaterThanEqual, Type::Bool2, Type::HalfFloat>,
-
- &SPIRVDecompiler::Texture,
- &SPIRVDecompiler::TextureLod,
- &SPIRVDecompiler::TextureGather,
- &SPIRVDecompiler::TextureQueryDimensions,
- &SPIRVDecompiler::TextureQueryLod,
- &SPIRVDecompiler::TexelFetch,
- &SPIRVDecompiler::TextureGradient,
-
- &SPIRVDecompiler::ImageLoad,
- &SPIRVDecompiler::ImageStore,
- &SPIRVDecompiler::AtomicImage<&Module::OpAtomicIAdd>,
- &SPIRVDecompiler::AtomicImage<&Module::OpAtomicAnd>,
- &SPIRVDecompiler::AtomicImage<&Module::OpAtomicOr>,
- &SPIRVDecompiler::AtomicImage<&Module::OpAtomicXor>,
- &SPIRVDecompiler::AtomicImage<&Module::OpAtomicExchange>,
-
- &SPIRVDecompiler::Atomic<&Module::OpAtomicExchange>,
- &SPIRVDecompiler::Atomic<&Module::OpAtomicIAdd>,
- &SPIRVDecompiler::Atomic<&Module::OpAtomicUMin>,
- &SPIRVDecompiler::Atomic<&Module::OpAtomicUMax>,
- &SPIRVDecompiler::Atomic<&Module::OpAtomicAnd>,
- &SPIRVDecompiler::Atomic<&Module::OpAtomicOr>,
- &SPIRVDecompiler::Atomic<&Module::OpAtomicXor>,
-
- &SPIRVDecompiler::Atomic<&Module::OpAtomicExchange>,
- &SPIRVDecompiler::Atomic<&Module::OpAtomicIAdd>,
- &SPIRVDecompiler::Atomic<&Module::OpAtomicSMin>,
- &SPIRVDecompiler::Atomic<&Module::OpAtomicSMax>,
- &SPIRVDecompiler::Atomic<&Module::OpAtomicAnd>,
- &SPIRVDecompiler::Atomic<&Module::OpAtomicOr>,
- &SPIRVDecompiler::Atomic<&Module::OpAtomicXor>,
-
- &SPIRVDecompiler::Reduce<&Module::OpAtomicIAdd>,
- &SPIRVDecompiler::Reduce<&Module::OpAtomicUMin>,
- &SPIRVDecompiler::Reduce<&Module::OpAtomicUMax>,
- &SPIRVDecompiler::Reduce<&Module::OpAtomicAnd>,
- &SPIRVDecompiler::Reduce<&Module::OpAtomicOr>,
- &SPIRVDecompiler::Reduce<&Module::OpAtomicXor>,
-
- &SPIRVDecompiler::Reduce<&Module::OpAtomicIAdd>,
- &SPIRVDecompiler::Reduce<&Module::OpAtomicSMin>,
- &SPIRVDecompiler::Reduce<&Module::OpAtomicSMax>,
- &SPIRVDecompiler::Reduce<&Module::OpAtomicAnd>,
- &SPIRVDecompiler::Reduce<&Module::OpAtomicOr>,
- &SPIRVDecompiler::Reduce<&Module::OpAtomicXor>,
-
- &SPIRVDecompiler::Branch,
- &SPIRVDecompiler::BranchIndirect,
- &SPIRVDecompiler::PushFlowStack,
- &SPIRVDecompiler::PopFlowStack,
- &SPIRVDecompiler::Exit,
- &SPIRVDecompiler::Discard,
-
- &SPIRVDecompiler::EmitVertex,
- &SPIRVDecompiler::EndPrimitive,
-
- &SPIRVDecompiler::InvocationId,
- &SPIRVDecompiler::YNegate,
- &SPIRVDecompiler::LocalInvocationId<0>,
- &SPIRVDecompiler::LocalInvocationId<1>,
- &SPIRVDecompiler::LocalInvocationId<2>,
- &SPIRVDecompiler::WorkGroupId<0>,
- &SPIRVDecompiler::WorkGroupId<1>,
- &SPIRVDecompiler::WorkGroupId<2>,
-
- &SPIRVDecompiler::BallotThread,
- &SPIRVDecompiler::Vote<&Module::OpSubgroupAllKHR>,
- &SPIRVDecompiler::Vote<&Module::OpSubgroupAnyKHR>,
- &SPIRVDecompiler::Vote<&Module::OpSubgroupAllEqualKHR>,
-
- &SPIRVDecompiler::ThreadId,
- &SPIRVDecompiler::ThreadMask<0>, // Eq
- &SPIRVDecompiler::ThreadMask<1>, // Ge
- &SPIRVDecompiler::ThreadMask<2>, // Gt
- &SPIRVDecompiler::ThreadMask<3>, // Le
- &SPIRVDecompiler::ThreadMask<4>, // Lt
- &SPIRVDecompiler::ShuffleIndexed,
-
- &SPIRVDecompiler::Barrier,
- &SPIRVDecompiler::MemoryBarrier<spv::Scope::Workgroup>,
- &SPIRVDecompiler::MemoryBarrier<spv::Scope::Device>,
- };
- static_assert(operation_decompilers.size() == static_cast<std::size_t>(OperationCode::Amount));
-
- const Device& device;
- const ShaderIR& ir;
- const ShaderType stage;
- const Tegra::Shader::Header header;
- const Registry& registry;
- const Specialization& specialization;
- std::unordered_map<u8, VaryingTFB> transform_feedback;
-
- const Id t_void = Name(TypeVoid(), "void");
-
- const Id t_bool = Name(TypeBool(), "bool");
- const Id t_bool2 = Name(TypeVector(t_bool, 2), "bool2");
-
- const Id t_int = Name(TypeInt(32, true), "int");
- const Id t_int2 = Name(TypeVector(t_int, 2), "int2");
- const Id t_int3 = Name(TypeVector(t_int, 3), "int3");
- const Id t_int4 = Name(TypeVector(t_int, 4), "int4");
-
- const Id t_uint = Name(TypeInt(32, false), "uint");
- const Id t_uint2 = Name(TypeVector(t_uint, 2), "uint2");
- const Id t_uint3 = Name(TypeVector(t_uint, 3), "uint3");
- const Id t_uint4 = Name(TypeVector(t_uint, 4), "uint4");
-
- const Id t_float = Name(TypeFloat(32), "float");
- const Id t_float2 = Name(TypeVector(t_float, 2), "float2");
- const Id t_float3 = Name(TypeVector(t_float, 3), "float3");
- const Id t_float4 = Name(TypeVector(t_float, 4), "float4");
-
- const Id t_prv_bool = Name(TypePointer(spv::StorageClass::Private, t_bool), "prv_bool");
- const Id t_prv_float = Name(TypePointer(spv::StorageClass::Private, t_float), "prv_float");
-
- const Id t_func_uint = Name(TypePointer(spv::StorageClass::Function, t_uint), "func_uint");
-
- const Id t_in_bool = Name(TypePointer(spv::StorageClass::Input, t_bool), "in_bool");
- const Id t_in_int = Name(TypePointer(spv::StorageClass::Input, t_int), "in_int");
- const Id t_in_int4 = Name(TypePointer(spv::StorageClass::Input, t_int4), "in_int4");
- const Id t_in_uint = Name(TypePointer(spv::StorageClass::Input, t_uint), "in_uint");
- const Id t_in_uint3 = Name(TypePointer(spv::StorageClass::Input, t_uint3), "in_uint3");
- const Id t_in_uint4 = Name(TypePointer(spv::StorageClass::Input, t_uint4), "in_uint4");
- const Id t_in_float = Name(TypePointer(spv::StorageClass::Input, t_float), "in_float");
- const Id t_in_float2 = Name(TypePointer(spv::StorageClass::Input, t_float2), "in_float2");
- const Id t_in_float3 = Name(TypePointer(spv::StorageClass::Input, t_float3), "in_float3");
- const Id t_in_float4 = Name(TypePointer(spv::StorageClass::Input, t_float4), "in_float4");
-
- const Id t_out_int = Name(TypePointer(spv::StorageClass::Output, t_int), "out_int");
-
- const Id t_out_float = Name(TypePointer(spv::StorageClass::Output, t_float), "out_float");
- const Id t_out_float4 = Name(TypePointer(spv::StorageClass::Output, t_float4), "out_float4");
-
- const Id t_cbuf_float = TypePointer(spv::StorageClass::Uniform, t_float);
- const Id t_cbuf_std140 = Decorate(
- Name(TypeArray(t_float4, Constant(t_uint, MaxConstBufferElements)), "CbufStd140Array"),
- spv::Decoration::ArrayStride, 16U);
- const Id t_cbuf_scalar = Decorate(
- Name(TypeArray(t_float, Constant(t_uint, MaxConstBufferFloats)), "CbufScalarArray"),
- spv::Decoration::ArrayStride, 4U);
- const Id t_cbuf_std140_struct = MemberDecorate(
- Decorate(TypeStruct(t_cbuf_std140), spv::Decoration::Block), 0, spv::Decoration::Offset, 0);
- const Id t_cbuf_scalar_struct = MemberDecorate(
- Decorate(TypeStruct(t_cbuf_scalar), spv::Decoration::Block), 0, spv::Decoration::Offset, 0);
- const Id t_cbuf_std140_ubo = TypePointer(spv::StorageClass::Uniform, t_cbuf_std140_struct);
- const Id t_cbuf_scalar_ubo = TypePointer(spv::StorageClass::Uniform, t_cbuf_scalar_struct);
-
- Id t_smem_uint{};
-
- const Id t_gmem_uint = TypePointer(spv::StorageClass::StorageBuffer, t_uint);
- const Id t_gmem_array =
- Name(Decorate(TypeRuntimeArray(t_uint), spv::Decoration::ArrayStride, 4U), "GmemArray");
- const Id t_gmem_struct = MemberDecorate(
- Decorate(TypeStruct(t_gmem_array), spv::Decoration::Block), 0, spv::Decoration::Offset, 0);
- const Id t_gmem_ssbo = TypePointer(spv::StorageClass::StorageBuffer, t_gmem_struct);
-
- const Id t_image_uint = TypePointer(spv::StorageClass::Image, t_uint);
-
- const Id v_float_zero = Constant(t_float, 0.0f);
- const Id v_float_one = Constant(t_float, 1.0f);
- const Id v_uint_zero = Constant(t_uint, 0);
-
- // Nvidia uses these defaults for varyings (e.g. position and generic attributes)
- const Id v_varying_default =
- ConstantComposite(t_float4, v_float_zero, v_float_zero, v_float_zero, v_float_one);
-
- const Id v_true = ConstantTrue(t_bool);
- const Id v_false = ConstantFalse(t_bool);
-
- Id t_scalar_half{};
- Id t_half{};
-
- Id out_vertex{};
- Id in_vertex{};
- std::map<u32, Id> registers;
- std::map<u32, Id> custom_variables;
- std::map<Tegra::Shader::Pred, Id> predicates;
- std::map<u32, Id> flow_variables;
- Id local_memory{};
- Id shared_memory{};
- std::array<Id, INTERNAL_FLAGS_COUNT> internal_flags{};
- std::map<Attribute::Index, Id> input_attributes;
- std::unordered_map<u8, GenericVaryingDescription> output_attributes;
- std::map<u32, Id> constant_buffers;
- std::map<GlobalMemoryBase, Id> global_buffers;
- std::map<u32, TexelBuffer> uniform_texels;
- std::map<u32, SampledImage> sampled_images;
- std::map<u32, StorageImage> images;
-
- std::array<Id, Maxwell::NumRenderTargets> frag_colors{};
- Id instance_index{};
- Id vertex_index{};
- Id base_instance{};
- Id base_vertex{};
- Id frag_depth{};
- Id frag_coord{};
- Id front_facing{};
- Id point_coord{};
- Id tess_level_outer{};
- Id tess_level_inner{};
- Id tess_coord{};
- Id invocation_id{};
- Id workgroup_id{};
- Id local_invocation_id{};
- Id thread_id{};
- std::array<Id, 5> thread_masks{}; // eq, ge, gt, le, lt
-
- VertexIndices in_indices;
- VertexIndices out_indices;
-
- std::vector<Id> interfaces;
-
- Id jmp_to{};
- Id ssy_flow_stack_top{};
- Id pbk_flow_stack_top{};
- Id ssy_flow_stack{};
- Id pbk_flow_stack{};
- Id continue_label{};
- std::map<u32, Id> labels;
-
- bool conditional_branch_set{};
- bool inside_branch{};
-};
-
-class ExprDecompiler {
-public:
- explicit ExprDecompiler(SPIRVDecompiler& decomp_) : decomp{decomp_} {}
-
- Id operator()(const ExprAnd& expr) {
- const Id type_def = decomp.GetTypeDefinition(Type::Bool);
- const Id op1 = Visit(expr.operand1);
- const Id op2 = Visit(expr.operand2);
- return decomp.OpLogicalAnd(type_def, op1, op2);
- }
-
- Id operator()(const ExprOr& expr) {
- const Id type_def = decomp.GetTypeDefinition(Type::Bool);
- const Id op1 = Visit(expr.operand1);
- const Id op2 = Visit(expr.operand2);
- return decomp.OpLogicalOr(type_def, op1, op2);
- }
-
- Id operator()(const ExprNot& expr) {
- const Id type_def = decomp.GetTypeDefinition(Type::Bool);
- const Id op1 = Visit(expr.operand1);
- return decomp.OpLogicalNot(type_def, op1);
- }
-
- Id operator()(const ExprPredicate& expr) {
- const auto pred = static_cast<Tegra::Shader::Pred>(expr.predicate);
- return decomp.OpLoad(decomp.t_bool, decomp.predicates.at(pred));
- }
-
- Id operator()(const ExprCondCode& expr) {
- return decomp.AsBool(decomp.Visit(decomp.ir.GetConditionCode(expr.cc)));
- }
-
- Id operator()(const ExprVar& expr) {
- return decomp.OpLoad(decomp.t_bool, decomp.flow_variables.at(expr.var_index));
- }
-
- Id operator()(const ExprBoolean& expr) {
- return expr.value ? decomp.v_true : decomp.v_false;
- }
-
- Id operator()(const ExprGprEqual& expr) {
- const Id target = decomp.Constant(decomp.t_uint, expr.value);
- Id gpr = decomp.OpLoad(decomp.t_float, decomp.registers.at(expr.gpr));
- gpr = decomp.OpBitcast(decomp.t_uint, gpr);
- return decomp.OpIEqual(decomp.t_bool, gpr, target);
- }
-
- Id Visit(const Expr& node) {
- return std::visit(*this, *node);
- }
-
-private:
- SPIRVDecompiler& decomp;
-};
-
-class ASTDecompiler {
-public:
- explicit ASTDecompiler(SPIRVDecompiler& decomp_) : decomp{decomp_} {}
-
- void operator()(const ASTProgram& ast) {
- ASTNode current = ast.nodes.GetFirst();
- while (current) {
- Visit(current);
- current = current->GetNext();
- }
- }
-
- void operator()(const ASTIfThen& ast) {
- ExprDecompiler expr_parser{decomp};
- const Id condition = expr_parser.Visit(ast.condition);
- const Id then_label = decomp.OpLabel();
- const Id endif_label = decomp.OpLabel();
- decomp.OpSelectionMerge(endif_label, spv::SelectionControlMask::MaskNone);
- decomp.OpBranchConditional(condition, then_label, endif_label);
- decomp.AddLabel(then_label);
- ASTNode current = ast.nodes.GetFirst();
- while (current) {
- Visit(current);
- current = current->GetNext();
- }
- decomp.OpBranch(endif_label);
- decomp.AddLabel(endif_label);
- }
-
- void operator()([[maybe_unused]] const ASTIfElse& ast) {
- UNREACHABLE();
- }
-
- void operator()([[maybe_unused]] const ASTBlockEncoded& ast) {
- UNREACHABLE();
- }
-
- void operator()(const ASTBlockDecoded& ast) {
- decomp.VisitBasicBlock(ast.nodes);
- }
-
- void operator()(const ASTVarSet& ast) {
- ExprDecompiler expr_parser{decomp};
- const Id condition = expr_parser.Visit(ast.condition);
- decomp.OpStore(decomp.flow_variables.at(ast.index), condition);
- }
-
- void operator()([[maybe_unused]] const ASTLabel& ast) {
- // Do nothing
- }
-
- void operator()([[maybe_unused]] const ASTGoto& ast) {
- UNREACHABLE();
- }
-
- void operator()(const ASTDoWhile& ast) {
- const Id loop_label = decomp.OpLabel();
- const Id endloop_label = decomp.OpLabel();
- const Id loop_start_block = decomp.OpLabel();
- const Id loop_continue_block = decomp.OpLabel();
- current_loop_exit = endloop_label;
- decomp.OpBranch(loop_label);
- decomp.AddLabel(loop_label);
- decomp.OpLoopMerge(endloop_label, loop_continue_block, spv::LoopControlMask::MaskNone);
- decomp.OpBranch(loop_start_block);
- decomp.AddLabel(loop_start_block);
- ASTNode current = ast.nodes.GetFirst();
- while (current) {
- Visit(current);
- current = current->GetNext();
- }
- decomp.OpBranch(loop_continue_block);
- decomp.AddLabel(loop_continue_block);
- ExprDecompiler expr_parser{decomp};
- const Id condition = expr_parser.Visit(ast.condition);
- decomp.OpBranchConditional(condition, loop_label, endloop_label);
- decomp.AddLabel(endloop_label);
- }
-
- void operator()(const ASTReturn& ast) {
- if (!VideoCommon::Shader::ExprIsTrue(ast.condition)) {
- ExprDecompiler expr_parser{decomp};
- const Id condition = expr_parser.Visit(ast.condition);
- const Id then_label = decomp.OpLabel();
- const Id endif_label = decomp.OpLabel();
- decomp.OpSelectionMerge(endif_label, spv::SelectionControlMask::MaskNone);
- decomp.OpBranchConditional(condition, then_label, endif_label);
- decomp.AddLabel(then_label);
- if (ast.kills) {
- decomp.OpKill();
- } else {
- decomp.PreExit();
- decomp.OpReturn();
- }
- decomp.AddLabel(endif_label);
- } else {
- const Id next_block = decomp.OpLabel();
- decomp.OpBranch(next_block);
- decomp.AddLabel(next_block);
- if (ast.kills) {
- decomp.OpKill();
- } else {
- decomp.PreExit();
- decomp.OpReturn();
- }
- decomp.AddLabel(decomp.OpLabel());
- }
- }
-
- void operator()(const ASTBreak& ast) {
- if (!VideoCommon::Shader::ExprIsTrue(ast.condition)) {
- ExprDecompiler expr_parser{decomp};
- const Id condition = expr_parser.Visit(ast.condition);
- const Id then_label = decomp.OpLabel();
- const Id endif_label = decomp.OpLabel();
- decomp.OpSelectionMerge(endif_label, spv::SelectionControlMask::MaskNone);
- decomp.OpBranchConditional(condition, then_label, endif_label);
- decomp.AddLabel(then_label);
- decomp.OpBranch(current_loop_exit);
- decomp.AddLabel(endif_label);
- } else {
- const Id next_block = decomp.OpLabel();
- decomp.OpBranch(next_block);
- decomp.AddLabel(next_block);
- decomp.OpBranch(current_loop_exit);
- decomp.AddLabel(decomp.OpLabel());
- }
- }
-
- void Visit(const ASTNode& node) {
- std::visit(*this, *node->GetInnerData());
- }
-
-private:
- SPIRVDecompiler& decomp;
- Id current_loop_exit{};
-};
-
-void SPIRVDecompiler::DecompileAST() {
- const u32 num_flow_variables = ir.GetASTNumVariables();
- for (u32 i = 0; i < num_flow_variables; i++) {
- const Id id = OpVariable(t_prv_bool, spv::StorageClass::Private, v_false);
- Name(id, fmt::format("flow_var_{}", i));
- flow_variables.emplace(i, AddGlobalVariable(id));
- }
-
- DefinePrologue();
-
- const ASTNode program = ir.GetASTProgram();
- ASTDecompiler decompiler{*this};
- decompiler.Visit(program);
-
- const Id next_block = OpLabel();
- OpBranch(next_block);
- AddLabel(next_block);
-}
-
-} // Anonymous namespace
-
-ShaderEntries GenerateShaderEntries(const VideoCommon::Shader::ShaderIR& ir) {
- ShaderEntries entries;
- for (const auto& cbuf : ir.GetConstantBuffers()) {
- entries.const_buffers.emplace_back(cbuf.second, cbuf.first);
- }
- for (const auto& [base, usage] : ir.GetGlobalMemory()) {
- entries.global_buffers.emplace_back(GlobalBufferEntry{
- .cbuf_index = base.cbuf_index,
- .cbuf_offset = base.cbuf_offset,
- .is_written = usage.is_written,
- });
- }
- for (const auto& sampler : ir.GetSamplers()) {
- if (sampler.is_buffer) {
- entries.uniform_texels.emplace_back(sampler);
- } else {
- entries.samplers.emplace_back(sampler);
- }
- }
- for (const auto& image : ir.GetImages()) {
- if (image.type == Tegra::Shader::ImageType::TextureBuffer) {
- entries.storage_texels.emplace_back(image);
- } else {
- entries.images.emplace_back(image);
- }
- }
- for (const auto& attribute : ir.GetInputAttributes()) {
- if (IsGenericAttribute(attribute)) {
- entries.attributes.insert(GetGenericAttributeLocation(attribute));
- }
- }
- for (const auto& buffer : entries.const_buffers) {
- entries.enabled_uniform_buffers |= 1U << buffer.GetIndex();
- }
- entries.clip_distances = ir.GetClipDistances();
- entries.shader_length = ir.GetLength();
- entries.uses_warps = ir.UsesWarps();
- return entries;
-}
-
-std::vector<u32> Decompile(const Device& device, const VideoCommon::Shader::ShaderIR& ir,
- ShaderType stage, const VideoCommon::Shader::Registry& registry,
- const Specialization& specialization) {
- return SPIRVDecompiler(device, ir, stage, registry, specialization).Assemble();
-}
-
-} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_shader_decompiler.h b/src/video_core/renderer_vulkan/vk_shader_decompiler.h
deleted file mode 100644
index 5d94132a5..000000000
--- a/src/video_core/renderer_vulkan/vk_shader_decompiler.h
+++ /dev/null
@@ -1,99 +0,0 @@
-// Copyright 2019 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
-
-#pragma once
-
-#include <array>
-#include <set>
-#include <vector>
-
-#include "common/common_types.h"
-#include "video_core/engines/maxwell_3d.h"
-#include "video_core/engines/shader_type.h"
-#include "video_core/shader/registry.h"
-#include "video_core/shader/shader_ir.h"
-
-namespace Vulkan {
-
-class Device;
-
-using Maxwell = Tegra::Engines::Maxwell3D::Regs;
-using UniformTexelEntry = VideoCommon::Shader::SamplerEntry;
-using SamplerEntry = VideoCommon::Shader::SamplerEntry;
-using StorageTexelEntry = VideoCommon::Shader::ImageEntry;
-using ImageEntry = VideoCommon::Shader::ImageEntry;
-
-constexpr u32 DESCRIPTOR_SET = 0;
-
-class ConstBufferEntry : public VideoCommon::Shader::ConstBuffer {
-public:
- explicit constexpr ConstBufferEntry(const ConstBuffer& entry_, u32 index_)
- : ConstBuffer{entry_}, index{index_} {}
-
- constexpr u32 GetIndex() const {
- return index;
- }
-
-private:
- u32 index{};
-};
-
-struct GlobalBufferEntry {
- u32 cbuf_index{};
- u32 cbuf_offset{};
- bool is_written{};
-};
-
-struct ShaderEntries {
- u32 NumBindings() const {
- return static_cast<u32>(const_buffers.size() + global_buffers.size() +
- uniform_texels.size() + samplers.size() + storage_texels.size() +
- images.size());
- }
-
- std::vector<ConstBufferEntry> const_buffers;
- std::vector<GlobalBufferEntry> global_buffers;
- std::vector<UniformTexelEntry> uniform_texels;
- std::vector<SamplerEntry> samplers;
- std::vector<StorageTexelEntry> storage_texels;
- std::vector<ImageEntry> images;
- std::set<u32> attributes;
- std::array<bool, Maxwell::NumClipDistances> clip_distances{};
- std::size_t shader_length{};
- u32 enabled_uniform_buffers{};
- bool uses_warps{};
-};
-
-struct Specialization final {
- u32 base_binding{};
-
- // Compute specific
- std::array<u32, 3> workgroup_size{};
- u32 shared_memory_size{};
-
- // Graphics specific
- std::optional<float> point_size;
- std::bitset<Maxwell::NumVertexAttributes> enabled_attributes;
- std::array<Maxwell::VertexAttribute::Type, Maxwell::NumVertexAttributes> attribute_types{};
- bool ndc_minus_one_to_one{};
- bool early_fragment_tests{};
- float alpha_test_ref{};
- Maxwell::ComparisonOp alpha_test_func{};
-};
-// Old gcc versions don't consider this trivially copyable.
-// static_assert(std::is_trivially_copyable_v<Specialization>);
-
-struct SPIRVShader {
- std::vector<u32> code;
- ShaderEntries entries;
-};
-
-ShaderEntries GenerateShaderEntries(const VideoCommon::Shader::ShaderIR& ir);
-
-std::vector<u32> Decompile(const Device& device, const VideoCommon::Shader::ShaderIR& ir,
- Tegra::Engines::ShaderType stage,
- const VideoCommon::Shader::Registry& registry,
- const Specialization& specialization);
-
-} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp
index 0412b5234..555b12ed7 100644
--- a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp
+++ b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp
@@ -91,7 +91,7 @@ StagingBufferPool::StagingBufferPool(const Device& device_, MemoryAllocator& mem
.flags = 0,
.size = STREAM_BUFFER_SIZE,
.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT |
- VK_BUFFER_USAGE_INDEX_BUFFER_BIT,
+ VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT,
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
.queueFamilyIndexCount = 0,
.pQueueFamilyIndices = nullptr,
diff --git a/src/video_core/renderer_vulkan/vk_state_tracker.cpp b/src/video_core/renderer_vulkan/vk_state_tracker.cpp
index 956f86845..e3b7dd61c 100644
--- a/src/video_core/renderer_vulkan/vk_state_tracker.cpp
+++ b/src/video_core/renderer_vulkan/vk_state_tracker.cpp
@@ -29,9 +29,10 @@ using Flags = Maxwell3D::DirtyState::Flags;
Flags MakeInvalidationFlags() {
static constexpr int INVALIDATION_FLAGS[]{
- Viewports, Scissors, DepthBias, BlendConstants, DepthBounds,
- StencilProperties, CullMode, DepthBoundsEnable, DepthTestEnable, DepthWriteEnable,
- DepthCompareOp, FrontFace, StencilOp, StencilTestEnable, VertexBuffers,
+ Viewports, Scissors, DepthBias, BlendConstants, DepthBounds,
+ StencilProperties, LineWidth, CullMode, DepthBoundsEnable, DepthTestEnable,
+ DepthWriteEnable, DepthCompareOp, FrontFace, StencilOp, StencilTestEnable,
+ VertexBuffers, VertexInput,
};
Flags flags{};
for (const int flag : INVALIDATION_FLAGS) {
@@ -40,6 +41,12 @@ Flags MakeInvalidationFlags() {
for (int index = VertexBuffer0; index <= VertexBuffer31; ++index) {
flags[index] = true;
}
+ for (int index = VertexAttribute0; index <= VertexAttribute31; ++index) {
+ flags[index] = true;
+ }
+ for (int index = VertexBinding0; index <= VertexBinding31; ++index) {
+ flags[index] = true;
+ }
return flags;
}
@@ -79,6 +86,11 @@ void SetupDirtyStencilProperties(Tables& tables) {
table[OFF(stencil_back_func_mask)] = StencilProperties;
}
+void SetupDirtyLineWidth(Tables& tables) {
+ tables[0][OFF(line_width_smooth)] = LineWidth;
+ tables[0][OFF(line_width_aliased)] = LineWidth;
+}
+
void SetupDirtyCullMode(Tables& tables) {
auto& table = tables[0];
table[OFF(cull_face)] = CullMode;
@@ -134,31 +146,38 @@ void SetupDirtyBlending(Tables& tables) {
FillBlock(tables[0], OFF(independent_blend), NUM(independent_blend), Blending);
}
-void SetupDirtyInstanceDivisors(Tables& tables) {
- static constexpr size_t divisor_offset = 3;
- for (size_t index = 0; index < Regs::NumVertexArrays; ++index) {
- tables[0][OFF(instanced_arrays) + index] = InstanceDivisors;
- tables[0][OFF(vertex_array) + index * NUM(vertex_array[0]) + divisor_offset] =
- InstanceDivisors;
+void SetupDirtyViewportSwizzles(Tables& tables) {
+ static constexpr size_t swizzle_offset = 6;
+ for (size_t index = 0; index < Regs::NumViewports; ++index) {
+ tables[0][OFF(viewport_transform) + index * NUM(viewport_transform[0]) + swizzle_offset] =
+ ViewportSwizzles;
}
}
void SetupDirtyVertexAttributes(Tables& tables) {
- FillBlock(tables[0], OFF(vertex_attrib_format), NUM(vertex_attrib_format), VertexAttributes);
+ for (size_t i = 0; i < Regs::NumVertexAttributes; ++i) {
+ const size_t offset = OFF(vertex_attrib_format) + i * NUM(vertex_attrib_format[0]);
+ FillBlock(tables[0], offset, NUM(vertex_attrib_format[0]), VertexAttribute0 + i);
+ }
+ FillBlock(tables[1], OFF(vertex_attrib_format), Regs::NumVertexAttributes, VertexInput);
}
-void SetupDirtyViewportSwizzles(Tables& tables) {
- static constexpr size_t swizzle_offset = 6;
- for (size_t index = 0; index < Regs::NumViewports; ++index) {
- tables[0][OFF(viewport_transform) + index * NUM(viewport_transform[0]) + swizzle_offset] =
- ViewportSwizzles;
+void SetupDirtyVertexBindings(Tables& tables) {
+ // Do NOT include stride here, it's implicit in VertexBuffer
+ static constexpr size_t divisor_offset = 3;
+ for (size_t i = 0; i < Regs::NumVertexArrays; ++i) {
+ const u8 flag = static_cast<u8>(VertexBinding0 + i);
+ tables[0][OFF(instanced_arrays) + i] = VertexInput;
+ tables[1][OFF(instanced_arrays) + i] = flag;
+ tables[0][OFF(vertex_array) + i * NUM(vertex_array[0]) + divisor_offset] = VertexInput;
+ tables[1][OFF(vertex_array) + i * NUM(vertex_array[0]) + divisor_offset] = flag;
}
}
} // Anonymous namespace
StateTracker::StateTracker(Tegra::GPU& gpu)
: flags{gpu.Maxwell3D().dirty.flags}, invalidation_flags{MakeInvalidationFlags()} {
- auto& tables = gpu.Maxwell3D().dirty.tables;
+ auto& tables{gpu.Maxwell3D().dirty.tables};
SetupDirtyFlags(tables);
SetupDirtyViewports(tables);
SetupDirtyScissors(tables);
@@ -166,6 +185,7 @@ StateTracker::StateTracker(Tegra::GPU& gpu)
SetupDirtyBlendConstants(tables);
SetupDirtyDepthBounds(tables);
SetupDirtyStencilProperties(tables);
+ SetupDirtyLineWidth(tables);
SetupDirtyCullMode(tables);
SetupDirtyDepthBoundsEnable(tables);
SetupDirtyDepthTestEnable(tables);
@@ -175,9 +195,9 @@ StateTracker::StateTracker(Tegra::GPU& gpu)
SetupDirtyStencilOp(tables);
SetupDirtyStencilTestEnable(tables);
SetupDirtyBlending(tables);
- SetupDirtyInstanceDivisors(tables);
- SetupDirtyVertexAttributes(tables);
SetupDirtyViewportSwizzles(tables);
+ SetupDirtyVertexAttributes(tables);
+ SetupDirtyVertexBindings(tables);
}
} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_state_tracker.h b/src/video_core/renderer_vulkan/vk_state_tracker.h
index 84e918a71..5f78f6950 100644
--- a/src/video_core/renderer_vulkan/vk_state_tracker.h
+++ b/src/video_core/renderer_vulkan/vk_state_tracker.h
@@ -19,12 +19,19 @@ namespace Dirty {
enum : u8 {
First = VideoCommon::Dirty::LastCommonEntry,
+ VertexInput,
+ VertexAttribute0,
+ VertexAttribute31 = VertexAttribute0 + 31,
+ VertexBinding0,
+ VertexBinding31 = VertexBinding0 + 31,
+
Viewports,
Scissors,
DepthBias,
BlendConstants,
DepthBounds,
StencilProperties,
+ LineWidth,
CullMode,
DepthBoundsEnable,
@@ -36,11 +43,9 @@ enum : u8 {
StencilTestEnable,
Blending,
- InstanceDivisors,
- VertexAttributes,
ViewportSwizzles,
- Last
+ Last,
};
static_assert(Last <= std::numeric_limits<u8>::max());
@@ -89,6 +94,10 @@ public:
return Exchange(Dirty::StencilProperties, false);
}
+ bool TouchLineWidth() const {
+ return Exchange(Dirty::LineWidth, false);
+ }
+
bool TouchCullMode() {
return Exchange(Dirty::CullMode, false);
}
diff --git a/src/video_core/renderer_vulkan/vk_swapchain.cpp b/src/video_core/renderer_vulkan/vk_swapchain.cpp
index dfd5c65ba..d990eefba 100644
--- a/src/video_core/renderer_vulkan/vk_swapchain.cpp
+++ b/src/video_core/renderer_vulkan/vk_swapchain.cpp
@@ -65,6 +65,9 @@ VKSwapchain::VKSwapchain(VkSurfaceKHR surface_, const Device& device_, VKSchedul
VKSwapchain::~VKSwapchain() = default;
void VKSwapchain::Create(u32 width, u32 height, bool srgb) {
+ is_outdated = false;
+ is_suboptimal = false;
+
const auto physical_device = device.GetPhysical();
const auto capabilities{physical_device.GetSurfaceCapabilitiesKHR(surface)};
if (capabilities.maxImageExtent.width == 0 || capabilities.maxImageExtent.height == 0) {
@@ -82,21 +85,31 @@ void VKSwapchain::Create(u32 width, u32 height, bool srgb) {
resource_ticks.resize(image_count);
}
-bool VKSwapchain::AcquireNextImage() {
- const VkResult result =
- device.GetLogical().AcquireNextImageKHR(*swapchain, std::numeric_limits<u64>::max(),
- *present_semaphores[frame_index], {}, &image_index);
-
+void VKSwapchain::AcquireNextImage() {
+ const VkResult result = device.GetLogical().AcquireNextImageKHR(
+ *swapchain, std::numeric_limits<u64>::max(), *present_semaphores[frame_index],
+ VK_NULL_HANDLE, &image_index);
+ switch (result) {
+ case VK_SUCCESS:
+ break;
+ case VK_SUBOPTIMAL_KHR:
+ is_suboptimal = true;
+ break;
+ case VK_ERROR_OUT_OF_DATE_KHR:
+ is_outdated = true;
+ break;
+ default:
+ LOG_ERROR(Render_Vulkan, "vkAcquireNextImageKHR returned {}", vk::ToString(result));
+ break;
+ }
scheduler.Wait(resource_ticks[image_index]);
- return result == VK_SUCCESS || result == VK_SUBOPTIMAL_KHR;
+ resource_ticks[image_index] = scheduler.CurrentTick();
}
-bool VKSwapchain::Present(VkSemaphore render_semaphore) {
+void VKSwapchain::Present(VkSemaphore render_semaphore) {
const VkSemaphore present_semaphore{*present_semaphores[frame_index]};
const std::array<VkSemaphore, 2> semaphores{present_semaphore, render_semaphore};
const auto present_queue{device.GetPresentQueue()};
- bool recreated = false;
-
const VkPresentInfoKHR present_info{
.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR,
.pNext = nullptr,
@@ -107,7 +120,6 @@ bool VKSwapchain::Present(VkSemaphore render_semaphore) {
.pImageIndices = &image_index,
.pResults = nullptr,
};
-
switch (const VkResult result = present_queue.Present(present_info)) {
case VK_SUCCESS:
break;
@@ -115,24 +127,16 @@ bool VKSwapchain::Present(VkSemaphore render_semaphore) {
LOG_DEBUG(Render_Vulkan, "Suboptimal swapchain");
break;
case VK_ERROR_OUT_OF_DATE_KHR:
- if (current_width > 0 && current_height > 0) {
- Create(current_width, current_height, current_srgb);
- recreated = true;
- }
+ is_outdated = true;
break;
default:
LOG_CRITICAL(Render_Vulkan, "Failed to present with error {}", vk::ToString(result));
break;
}
-
- resource_ticks[image_index] = scheduler.CurrentTick();
- frame_index = (frame_index + 1) % static_cast<u32>(image_count);
- return recreated;
-}
-
-bool VKSwapchain::HasFramebufferChanged(const Layout::FramebufferLayout& framebuffer) const {
- // TODO(Rodrigo): Handle framebuffer pixel format changes
- return framebuffer.width != current_width || framebuffer.height != current_height;
+ ++frame_index;
+ if (frame_index >= image_count) {
+ frame_index = 0;
+ }
}
void VKSwapchain::CreateSwapchain(const VkSurfaceCapabilitiesKHR& capabilities, u32 width,
@@ -148,7 +152,6 @@ void VKSwapchain::CreateSwapchain(const VkSurfaceCapabilitiesKHR& capabilities,
if (capabilities.maxImageCount > 0 && requested_image_count > capabilities.maxImageCount) {
requested_image_count = capabilities.maxImageCount;
}
-
VkSwapchainCreateInfoKHR swapchain_ci{
.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR,
.pNext = nullptr,
@@ -169,7 +172,6 @@ void VKSwapchain::CreateSwapchain(const VkSurfaceCapabilitiesKHR& capabilities,
.clipped = VK_FALSE,
.oldSwapchain = nullptr,
};
-
const u32 graphics_family{device.GetGraphicsFamily()};
const u32 present_family{device.GetPresentFamily()};
const std::array<u32, 2> queue_indices{graphics_family, present_family};
@@ -178,7 +180,6 @@ void VKSwapchain::CreateSwapchain(const VkSurfaceCapabilitiesKHR& capabilities,
swapchain_ci.queueFamilyIndexCount = static_cast<u32>(queue_indices.size());
swapchain_ci.pQueueFamilyIndices = queue_indices.data();
}
-
// Request the size again to reduce the possibility of a TOCTOU race condition.
const auto updated_capabilities = physical_device.GetSurfaceCapabilitiesKHR(surface);
swapchain_ci.imageExtent = ChooseSwapExtent(updated_capabilities, width, height);
@@ -186,8 +187,6 @@ void VKSwapchain::CreateSwapchain(const VkSurfaceCapabilitiesKHR& capabilities,
swapchain = device.GetLogical().CreateSwapchainKHR(swapchain_ci);
extent = swapchain_ci.imageExtent;
- current_width = extent.width;
- current_height = extent.height;
current_srgb = srgb;
images = swapchain.GetImages();
@@ -197,8 +196,8 @@ void VKSwapchain::CreateSwapchain(const VkSurfaceCapabilitiesKHR& capabilities,
void VKSwapchain::CreateSemaphores() {
present_semaphores.resize(image_count);
- std::generate(present_semaphores.begin(), present_semaphores.end(),
- [this] { return device.GetLogical().CreateSemaphore(); });
+ std::ranges::generate(present_semaphores,
+ [this] { return device.GetLogical().CreateSemaphore(); });
}
void VKSwapchain::CreateImageViews() {
diff --git a/src/video_core/renderer_vulkan/vk_swapchain.h b/src/video_core/renderer_vulkan/vk_swapchain.h
index adc8d27cf..35c2cdc14 100644
--- a/src/video_core/renderer_vulkan/vk_swapchain.h
+++ b/src/video_core/renderer_vulkan/vk_swapchain.h
@@ -28,14 +28,25 @@ public:
void Create(u32 width, u32 height, bool srgb);
/// Acquires the next image in the swapchain, waits as needed.
- bool AcquireNextImage();
+ void AcquireNextImage();
- /// Presents the rendered image to the swapchain. Returns true when the swapchains had to be
- /// recreated. Takes responsability for the ownership of fence.
- bool Present(VkSemaphore render_semaphore);
+ /// Presents the rendered image to the swapchain.
+ void Present(VkSemaphore render_semaphore);
- /// Returns true when the framebuffer layout has changed.
- bool HasFramebufferChanged(const Layout::FramebufferLayout& framebuffer) const;
+ /// Returns true when the color space has changed.
+ bool HasColorSpaceChanged(bool is_srgb) const {
+ return current_srgb != is_srgb;
+ }
+
+ /// Returns true when the swapchain is outdated.
+ bool IsOutDated() const {
+ return is_outdated;
+ }
+
+ /// Returns true when the swapchain is suboptimal.
+ bool IsSubOptimal() const {
+ return is_suboptimal;
+ }
VkExtent2D GetSize() const {
return extent;
@@ -61,10 +72,6 @@ public:
return image_format;
}
- bool GetSrgbState() const {
- return current_srgb;
- }
-
private:
void CreateSwapchain(const VkSurfaceCapabilitiesKHR& capabilities, u32 width, u32 height,
bool srgb);
@@ -92,9 +99,9 @@ private:
VkFormat image_format{};
VkExtent2D extent{};
- u32 current_width{};
- u32 current_height{};
bool current_srgb{};
+ bool is_outdated{};
+ bool is_suboptimal{};
};
} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.cpp b/src/video_core/renderer_vulkan/vk_texture_cache.cpp
index 88ccf96f5..8e029bcb3 100644
--- a/src/video_core/renderer_vulkan/vk_texture_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_texture_cache.cpp
@@ -15,6 +15,7 @@
#include "video_core/renderer_vulkan/maxwell_to_vk.h"
#include "video_core/renderer_vulkan/vk_compute_pass.h"
#include "video_core/renderer_vulkan/vk_rasterizer.h"
+#include "video_core/renderer_vulkan/vk_render_pass_cache.h"
#include "video_core/renderer_vulkan/vk_scheduler.h"
#include "video_core/renderer_vulkan/vk_staging_buffer_pool.h"
#include "video_core/renderer_vulkan/vk_texture_cache.h"
@@ -34,19 +35,6 @@ using VideoCommon::SubresourceRange;
using VideoCore::Surface::IsPixelFormatASTC;
namespace {
-
-constexpr std::array ATTACHMENT_REFERENCES{
- VkAttachmentReference{0, VK_IMAGE_LAYOUT_GENERAL},
- VkAttachmentReference{1, VK_IMAGE_LAYOUT_GENERAL},
- VkAttachmentReference{2, VK_IMAGE_LAYOUT_GENERAL},
- VkAttachmentReference{3, VK_IMAGE_LAYOUT_GENERAL},
- VkAttachmentReference{4, VK_IMAGE_LAYOUT_GENERAL},
- VkAttachmentReference{5, VK_IMAGE_LAYOUT_GENERAL},
- VkAttachmentReference{6, VK_IMAGE_LAYOUT_GENERAL},
- VkAttachmentReference{7, VK_IMAGE_LAYOUT_GENERAL},
- VkAttachmentReference{8, VK_IMAGE_LAYOUT_GENERAL},
-};
-
constexpr VkBorderColor ConvertBorderColor(const std::array<float, 4>& color) {
if (color == std::array<float, 4>{0, 0, 0, 0}) {
return VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK;
@@ -174,25 +162,6 @@ constexpr VkBorderColor ConvertBorderColor(const std::array<float, 4>& color) {
return device.GetLogical().CreateImage(MakeImageCreateInfo(device, info));
}
-[[nodiscard]] vk::Buffer MakeBuffer(const Device& device, const ImageInfo& info) {
- if (info.type != ImageType::Buffer) {
- return vk::Buffer{};
- }
- const size_t bytes_per_block = VideoCore::Surface::BytesPerBlock(info.format);
- return device.GetLogical().CreateBuffer(VkBufferCreateInfo{
- .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
- .pNext = nullptr,
- .flags = 0,
- .size = info.size.width * bytes_per_block,
- .usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT |
- VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT |
- VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT,
- .sharingMode = VK_SHARING_MODE_EXCLUSIVE,
- .queueFamilyIndexCount = 0,
- .pQueueFamilyIndices = nullptr,
- });
-}
-
[[nodiscard]] VkImageAspectFlags ImageAspectMask(PixelFormat format) {
switch (VideoCore::Surface::GetFormatType(format)) {
case VideoCore::Surface::SurfaceType::ColorTexture:
@@ -226,23 +195,6 @@ constexpr VkBorderColor ConvertBorderColor(const std::array<float, 4>& color) {
}
}
-[[nodiscard]] VkAttachmentDescription AttachmentDescription(const Device& device,
- const ImageView* image_view) {
- using MaxwellToVK::SurfaceFormat;
- const PixelFormat pixel_format = image_view->format;
- return VkAttachmentDescription{
- .flags = VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT,
- .format = SurfaceFormat(device, FormatType::Optimal, true, pixel_format).format,
- .samples = image_view->Samples(),
- .loadOp = VK_ATTACHMENT_LOAD_OP_LOAD,
- .storeOp = VK_ATTACHMENT_STORE_OP_STORE,
- .stencilLoadOp = VK_ATTACHMENT_LOAD_OP_LOAD,
- .stencilStoreOp = VK_ATTACHMENT_STORE_OP_STORE,
- .initialLayout = VK_IMAGE_LAYOUT_GENERAL,
- .finalLayout = VK_IMAGE_LAYOUT_GENERAL,
- };
-}
-
[[nodiscard]] VkComponentSwizzle ComponentSwizzle(SwizzleSource swizzle) {
switch (swizzle) {
case SwizzleSource::Zero:
@@ -263,6 +215,30 @@ constexpr VkBorderColor ConvertBorderColor(const std::array<float, 4>& color) {
return VK_COMPONENT_SWIZZLE_ZERO;
}
+[[nodiscard]] VkImageViewType ImageViewType(Shader::TextureType type) {
+ switch (type) {
+ case Shader::TextureType::Color1D:
+ return VK_IMAGE_VIEW_TYPE_1D;
+ case Shader::TextureType::Color2D:
+ return VK_IMAGE_VIEW_TYPE_2D;
+ case Shader::TextureType::ColorCube:
+ return VK_IMAGE_VIEW_TYPE_CUBE;
+ case Shader::TextureType::Color3D:
+ return VK_IMAGE_VIEW_TYPE_3D;
+ case Shader::TextureType::ColorArray1D:
+ return VK_IMAGE_VIEW_TYPE_1D_ARRAY;
+ case Shader::TextureType::ColorArray2D:
+ return VK_IMAGE_VIEW_TYPE_2D_ARRAY;
+ case Shader::TextureType::ColorArrayCube:
+ return VK_IMAGE_VIEW_TYPE_CUBE_ARRAY;
+ case Shader::TextureType::Buffer:
+ UNREACHABLE_MSG("Texture buffers can't be image views");
+ return VK_IMAGE_VIEW_TYPE_1D;
+ }
+ UNREACHABLE_MSG("Invalid image view type={}", type);
+ return VK_IMAGE_VIEW_TYPE_2D;
+}
+
[[nodiscard]] VkImageViewType ImageViewType(VideoCommon::ImageViewType type) {
switch (type) {
case VideoCommon::ImageViewType::e1D:
@@ -280,7 +256,7 @@ constexpr VkBorderColor ConvertBorderColor(const std::array<float, 4>& color) {
case VideoCommon::ImageViewType::CubeArray:
return VK_IMAGE_VIEW_TYPE_CUBE_ARRAY;
case VideoCommon::ImageViewType::Rect:
- LOG_WARNING(Render_Vulkan, "Unnormalized image view type not supported");
+ UNIMPLEMENTED_MSG("Rect image view");
return VK_IMAGE_VIEW_TYPE_2D;
case VideoCommon::ImageViewType::Buffer:
UNREACHABLE_MSG("Texture buffers can't be image views");
@@ -327,7 +303,7 @@ constexpr VkBorderColor ConvertBorderColor(const std::array<float, 4>& color) {
};
}
-[[nodiscard]] std::vector<VkBufferCopy> TransformBufferCopies(
+[[maybe_unused]] [[nodiscard]] std::vector<VkBufferCopy> TransformBufferCopies(
std::span<const VideoCommon::BufferCopy> copies, size_t buffer_offset) {
std::vector<VkBufferCopy> result(copies.size());
std::ranges::transform(
@@ -587,6 +563,28 @@ struct RangedBarrierRange {
}
};
+[[nodiscard]] VkFormat Format(Shader::ImageFormat format) {
+ switch (format) {
+ case Shader::ImageFormat::Typeless:
+ break;
+ case Shader::ImageFormat::R8_SINT:
+ return VK_FORMAT_R8_SINT;
+ case Shader::ImageFormat::R8_UINT:
+ return VK_FORMAT_R8_UINT;
+ case Shader::ImageFormat::R16_UINT:
+ return VK_FORMAT_R16_UINT;
+ case Shader::ImageFormat::R16_SINT:
+ return VK_FORMAT_R16_SINT;
+ case Shader::ImageFormat::R32_UINT:
+ return VK_FORMAT_R32_UINT;
+ case Shader::ImageFormat::R32G32_UINT:
+ return VK_FORMAT_R32G32_UINT;
+ case Shader::ImageFormat::R32G32B32A32_UINT:
+ return VK_FORMAT_R32G32B32A32_UINT;
+ }
+ UNREACHABLE_MSG("Invalid image format={}", format);
+ return VK_FORMAT_R32_UINT;
+}
} // Anonymous namespace
void TextureCacheRuntime::Finish() {
@@ -625,7 +623,7 @@ void TextureCacheRuntime::BlitImage(Framebuffer* dst_framebuffer, ImageView& dst
return;
}
}
- ASSERT(src.ImageFormat() == dst.ImageFormat());
+ ASSERT(src.format == dst.format);
ASSERT(!(is_dst_msaa && !is_src_msaa));
ASSERT(operation == Fermi2D::Operation::SrcCopy);
@@ -842,13 +840,9 @@ u64 TextureCacheRuntime::GetDeviceLocalMemory() const {
Image::Image(TextureCacheRuntime& runtime, const ImageInfo& info_, GPUVAddr gpu_addr_,
VAddr cpu_addr_)
: VideoCommon::ImageBase(info_, gpu_addr_, cpu_addr_), scheduler{&runtime.scheduler},
- image(MakeImage(runtime.device, info)), buffer(MakeBuffer(runtime.device, info)),
+ image(MakeImage(runtime.device, info)),
+ commit(runtime.memory_allocator.Commit(image, MemoryUsage::DeviceLocal)),
aspect_mask(ImageAspectMask(info.format)) {
- if (image) {
- commit = runtime.memory_allocator.Commit(image, MemoryUsage::DeviceLocal);
- } else {
- commit = runtime.memory_allocator.Commit(buffer, MemoryUsage::DeviceLocal);
- }
if (IsPixelFormatASTC(info.format) && !runtime.device.IsOptimalAstcSupported()) {
if (Settings::values.accelerate_astc.GetValue()) {
flags |= VideoCommon::ImageFlagBits::AcceleratedUpload;
@@ -857,11 +851,7 @@ Image::Image(TextureCacheRuntime& runtime, const ImageInfo& info_, GPUVAddr gpu_
}
}
if (runtime.device.HasDebuggingToolAttached()) {
- if (image) {
- image.SetObjectNameEXT(VideoCommon::Name(*this).c_str());
- } else {
- buffer.SetObjectNameEXT(VideoCommon::Name(*this).c_str());
- }
+ image.SetObjectNameEXT(VideoCommon::Name(*this).c_str());
}
static constexpr VkImageViewUsageCreateInfo storage_image_view_usage_create_info{
.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO,
@@ -913,19 +903,6 @@ void Image::UploadMemory(const StagingBufferRef& map, std::span<const BufferImag
});
}
-void Image::UploadMemory(const StagingBufferRef& map,
- std::span<const VideoCommon::BufferCopy> copies) {
- // TODO: Move this to another API
- scheduler->RequestOutsideRenderPassOperationContext();
- std::vector vk_copies = TransformBufferCopies(copies, map.offset);
- const VkBuffer src_buffer = map.buffer;
- const VkBuffer dst_buffer = *buffer;
- scheduler->Record([src_buffer, dst_buffer, vk_copies](vk::CommandBuffer cmdbuf) {
- // TODO: Barriers
- cmdbuf.CopyBuffer(src_buffer, dst_buffer, vk_copies);
- });
-}
-
void Image::DownloadMemory(const StagingBufferRef& map, std::span<const BufferImageCopy> copies) {
std::vector vk_copies = TransformBufferImageCopies(copies, map.offset, aspect_mask);
scheduler->RequestOutsideRenderPassOperationContext();
@@ -984,8 +961,9 @@ void Image::DownloadMemory(const StagingBufferRef& map, std::span<const BufferIm
ImageView::ImageView(TextureCacheRuntime& runtime, const VideoCommon::ImageViewInfo& info,
ImageId image_id_, Image& image)
: VideoCommon::ImageViewBase{info, image.info, image_id_}, device{&runtime.device},
- image_handle{image.Handle()}, image_format{image.info.format}, samples{ConvertSampleCount(
- image.info.num_samples)} {
+ image_handle{image.Handle()}, samples{ConvertSampleCount(image.info.num_samples)} {
+ using Shader::TextureType;
+
const VkImageAspectFlags aspect_mask = ImageViewAspectMask(info);
std::array<SwizzleSource, 4> swizzle{
SwizzleSource::R,
@@ -1023,57 +1001,54 @@ ImageView::ImageView(TextureCacheRuntime& runtime, const VideoCommon::ImageViewI
},
.subresourceRange = MakeSubresourceRange(aspect_mask, info.range),
};
- const auto create = [&](VideoCommon::ImageViewType view_type, std::optional<u32> num_layers) {
+ const auto create = [&](TextureType tex_type, std::optional<u32> num_layers) {
VkImageViewCreateInfo ci{create_info};
- ci.viewType = ImageViewType(view_type);
+ ci.viewType = ImageViewType(tex_type);
if (num_layers) {
ci.subresourceRange.layerCount = *num_layers;
}
vk::ImageView handle = device->GetLogical().CreateImageView(ci);
if (device->HasDebuggingToolAttached()) {
- handle.SetObjectNameEXT(VideoCommon::Name(*this, view_type).c_str());
+ handle.SetObjectNameEXT(VideoCommon::Name(*this).c_str());
}
- image_views[static_cast<size_t>(view_type)] = std::move(handle);
+ image_views[static_cast<size_t>(tex_type)] = std::move(handle);
};
switch (info.type) {
case VideoCommon::ImageViewType::e1D:
case VideoCommon::ImageViewType::e1DArray:
- create(VideoCommon::ImageViewType::e1D, 1);
- create(VideoCommon::ImageViewType::e1DArray, std::nullopt);
- render_target = Handle(VideoCommon::ImageViewType::e1DArray);
+ create(TextureType::Color1D, 1);
+ create(TextureType::ColorArray1D, std::nullopt);
+ render_target = Handle(TextureType::ColorArray1D);
break;
case VideoCommon::ImageViewType::e2D:
case VideoCommon::ImageViewType::e2DArray:
- create(VideoCommon::ImageViewType::e2D, 1);
- create(VideoCommon::ImageViewType::e2DArray, std::nullopt);
- render_target = Handle(VideoCommon::ImageViewType::e2DArray);
+ create(TextureType::Color2D, 1);
+ create(TextureType::ColorArray2D, std::nullopt);
+ render_target = Handle(Shader::TextureType::ColorArray2D);
break;
case VideoCommon::ImageViewType::e3D:
- create(VideoCommon::ImageViewType::e3D, std::nullopt);
- render_target = Handle(VideoCommon::ImageViewType::e3D);
+ create(TextureType::Color3D, std::nullopt);
+ render_target = Handle(Shader::TextureType::Color3D);
break;
case VideoCommon::ImageViewType::Cube:
case VideoCommon::ImageViewType::CubeArray:
- create(VideoCommon::ImageViewType::Cube, 6);
- create(VideoCommon::ImageViewType::CubeArray, std::nullopt);
+ create(TextureType::ColorCube, 6);
+ create(TextureType::ColorArrayCube, std::nullopt);
break;
case VideoCommon::ImageViewType::Rect:
UNIMPLEMENTED();
break;
case VideoCommon::ImageViewType::Buffer:
- buffer_view = device->GetLogical().CreateBufferView(VkBufferViewCreateInfo{
- .sType = VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO,
- .pNext = nullptr,
- .flags = 0,
- .buffer = image.Buffer(),
- .format = format_info.format,
- .offset = 0, // TODO: Redesign buffer cache to support this
- .range = image.guest_size_bytes,
- });
+ UNREACHABLE();
break;
}
}
+ImageView::ImageView(TextureCacheRuntime&, const VideoCommon::ImageInfo& info,
+ const VideoCommon::ImageViewInfo& view_info, GPUVAddr gpu_addr_)
+ : VideoCommon::ImageViewBase{info, view_info}, gpu_addr{gpu_addr_},
+ buffer_size{VideoCommon::CalculateGuestSizeInBytes(info)} {}
+
ImageView::ImageView(TextureCacheRuntime&, const VideoCommon::NullImageParams& params)
: VideoCommon::ImageViewBase{params} {}
@@ -1081,7 +1056,8 @@ VkImageView ImageView::DepthView() {
if (depth_view) {
return *depth_view;
}
- depth_view = MakeDepthStencilView(VK_IMAGE_ASPECT_DEPTH_BIT);
+ const auto& info = MaxwellToVK::SurfaceFormat(*device, FormatType::Optimal, true, format);
+ depth_view = MakeView(info.format, VK_IMAGE_ASPECT_DEPTH_BIT);
return *depth_view;
}
@@ -1089,18 +1065,38 @@ VkImageView ImageView::StencilView() {
if (stencil_view) {
return *stencil_view;
}
- stencil_view = MakeDepthStencilView(VK_IMAGE_ASPECT_STENCIL_BIT);
+ const auto& info = MaxwellToVK::SurfaceFormat(*device, FormatType::Optimal, true, format);
+ stencil_view = MakeView(info.format, VK_IMAGE_ASPECT_STENCIL_BIT);
return *stencil_view;
}
-vk::ImageView ImageView::MakeDepthStencilView(VkImageAspectFlags aspect_mask) {
+VkImageView ImageView::StorageView(Shader::TextureType texture_type,
+ Shader::ImageFormat image_format) {
+ if (image_format == Shader::ImageFormat::Typeless) {
+ return Handle(texture_type);
+ }
+ const bool is_signed{image_format == Shader::ImageFormat::R8_SINT ||
+ image_format == Shader::ImageFormat::R16_SINT};
+ if (!storage_views) {
+ storage_views = std::make_unique<StorageViews>();
+ }
+ auto& views{is_signed ? storage_views->signeds : storage_views->unsigneds};
+ auto& view{views[static_cast<size_t>(texture_type)]};
+ if (view) {
+ return *view;
+ }
+ view = MakeView(Format(image_format), VK_IMAGE_ASPECT_COLOR_BIT);
+ return *view;
+}
+
+vk::ImageView ImageView::MakeView(VkFormat vk_format, VkImageAspectFlags aspect_mask) {
return device->GetLogical().CreateImageView({
.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.image = image_handle,
.viewType = ImageViewType(type),
- .format = MaxwellToVK::SurfaceFormat(*device, FormatType::Optimal, true, format).format,
+ .format = vk_format,
.components{
.r = VK_COMPONENT_SWIZZLE_IDENTITY,
.g = VK_COMPONENT_SWIZZLE_IDENTITY,
@@ -1164,7 +1160,6 @@ Sampler::Sampler(TextureCacheRuntime& runtime, const Tegra::Texture::TSCEntry& t
Framebuffer::Framebuffer(TextureCacheRuntime& runtime, std::span<ImageView*, NUM_RT> color_buffers,
ImageView* depth_buffer, const VideoCommon::RenderTargets& key) {
- std::vector<VkAttachmentDescription> descriptions;
std::vector<VkImageView> attachments;
RenderPassKey renderpass_key{};
s32 num_layers = 1;
@@ -1175,7 +1170,6 @@ Framebuffer::Framebuffer(TextureCacheRuntime& runtime, std::span<ImageView*, NUM
renderpass_key.color_formats[index] = PixelFormat::Invalid;
continue;
}
- descriptions.push_back(AttachmentDescription(runtime.device, color_buffer));
attachments.push_back(color_buffer->RenderTarget());
renderpass_key.color_formats[index] = color_buffer->format;
num_layers = std::max(num_layers, color_buffer->range.extent.layers);
@@ -1185,10 +1179,7 @@ Framebuffer::Framebuffer(TextureCacheRuntime& runtime, std::span<ImageView*, NUM
++num_images;
}
const size_t num_colors = attachments.size();
- const VkAttachmentReference* depth_attachment =
- depth_buffer ? &ATTACHMENT_REFERENCES[num_colors] : nullptr;
if (depth_buffer) {
- descriptions.push_back(AttachmentDescription(runtime.device, depth_buffer));
attachments.push_back(depth_buffer->RenderTarget());
renderpass_key.depth_format = depth_buffer->format;
num_layers = std::max(num_layers, depth_buffer->range.extent.layers);
@@ -1201,40 +1192,14 @@ Framebuffer::Framebuffer(TextureCacheRuntime& runtime, std::span<ImageView*, NUM
}
renderpass_key.samples = samples;
- const auto& device = runtime.device.GetLogical();
- const auto [cache_pair, is_new] = runtime.renderpass_cache.try_emplace(renderpass_key);
- if (is_new) {
- const VkSubpassDescription subpass{
- .flags = 0,
- .pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS,
- .inputAttachmentCount = 0,
- .pInputAttachments = nullptr,
- .colorAttachmentCount = static_cast<u32>(num_colors),
- .pColorAttachments = num_colors != 0 ? ATTACHMENT_REFERENCES.data() : nullptr,
- .pResolveAttachments = nullptr,
- .pDepthStencilAttachment = depth_attachment,
- .preserveAttachmentCount = 0,
- .pPreserveAttachments = nullptr,
- };
- cache_pair->second = device.CreateRenderPass(VkRenderPassCreateInfo{
- .sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,
- .pNext = nullptr,
- .flags = 0,
- .attachmentCount = static_cast<u32>(descriptions.size()),
- .pAttachments = descriptions.data(),
- .subpassCount = 1,
- .pSubpasses = &subpass,
- .dependencyCount = 0,
- .pDependencies = nullptr,
- });
- }
- renderpass = *cache_pair->second;
+ renderpass = runtime.render_pass_cache.Get(renderpass_key);
+
render_area = VkExtent2D{
.width = key.size.width,
.height = key.size.height,
};
num_color_buffers = static_cast<u32>(num_colors);
- framebuffer = device.CreateFramebuffer(VkFramebufferCreateInfo{
+ framebuffer = runtime.device.GetLogical().CreateFramebuffer({
.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.h b/src/video_core/renderer_vulkan/vk_texture_cache.h
index 172bcdf98..0b73d55f8 100644
--- a/src/video_core/renderer_vulkan/vk_texture_cache.h
+++ b/src/video_core/renderer_vulkan/vk_texture_cache.h
@@ -7,6 +7,7 @@
#include <compare>
#include <span>
+#include "shader_recompiler/shader_info.h"
#include "video_core/renderer_vulkan/vk_staging_buffer_pool.h"
#include "video_core/texture_cache/texture_cache.h"
#include "video_core/vulkan_common/vulkan_memory_allocator.h"
@@ -26,35 +27,10 @@ class Device;
class Image;
class ImageView;
class Framebuffer;
+class RenderPassCache;
class StagingBufferPool;
class VKScheduler;
-struct RenderPassKey {
- constexpr auto operator<=>(const RenderPassKey&) const noexcept = default;
-
- std::array<PixelFormat, NUM_RT> color_formats;
- PixelFormat depth_format;
- VkSampleCountFlagBits samples;
-};
-
-} // namespace Vulkan
-
-namespace std {
-template <>
-struct hash<Vulkan::RenderPassKey> {
- [[nodiscard]] constexpr size_t operator()(const Vulkan::RenderPassKey& key) const noexcept {
- size_t value = static_cast<size_t>(key.depth_format) << 48;
- value ^= static_cast<size_t>(key.samples) << 52;
- for (size_t i = 0; i < key.color_formats.size(); ++i) {
- value ^= static_cast<size_t>(key.color_formats[i]) << (i * 6);
- }
- return value;
- }
-};
-} // namespace std
-
-namespace Vulkan {
-
struct TextureCacheRuntime {
const Device& device;
VKScheduler& scheduler;
@@ -62,13 +38,13 @@ struct TextureCacheRuntime {
StagingBufferPool& staging_buffer_pool;
BlitImageHelper& blit_image_helper;
ASTCDecoderPass& astc_decoder_pass;
- std::unordered_map<RenderPassKey, vk::RenderPass> renderpass_cache{};
+ RenderPassCache& render_pass_cache;
void Finish();
- [[nodiscard]] StagingBufferRef UploadStagingBuffer(size_t size);
+ StagingBufferRef UploadStagingBuffer(size_t size);
- [[nodiscard]] StagingBufferRef DownloadStagingBuffer(size_t size);
+ StagingBufferRef DownloadStagingBuffer(size_t size);
void BlitImage(Framebuffer* dst_framebuffer, ImageView& dst, ImageView& src,
const Region2D& dst_region, const Region2D& src_region,
@@ -79,7 +55,7 @@ struct TextureCacheRuntime {
void ConvertImage(Framebuffer* dst, ImageView& dst_view, ImageView& src_view);
- [[nodiscard]] bool CanAccelerateImageUpload(Image&) const noexcept {
+ bool CanAccelerateImageUpload(Image&) const noexcept {
return false;
}
@@ -117,8 +93,6 @@ public:
void UploadMemory(const StagingBufferRef& map,
std::span<const VideoCommon::BufferImageCopy> copies);
- void UploadMemory(const StagingBufferRef& map, std::span<const VideoCommon::BufferCopy> copies);
-
void DownloadMemory(const StagingBufferRef& map,
std::span<const VideoCommon::BufferImageCopy> copies);
@@ -126,10 +100,6 @@ public:
return *image;
}
- [[nodiscard]] VkBuffer Buffer() const noexcept {
- return *buffer;
- }
-
[[nodiscard]] VkImageAspectFlags AspectMask() const noexcept {
return aspect_mask;
}
@@ -146,7 +116,6 @@ public:
private:
VKScheduler* scheduler;
vk::Image image;
- vk::Buffer buffer;
MemoryCommit commit;
vk::ImageView image_view;
std::vector<vk::ImageView> storage_image_views;
@@ -157,18 +126,19 @@ private:
class ImageView : public VideoCommon::ImageViewBase {
public:
explicit ImageView(TextureCacheRuntime&, const VideoCommon::ImageViewInfo&, ImageId, Image&);
+ explicit ImageView(TextureCacheRuntime&, const VideoCommon::ImageInfo&,
+ const VideoCommon::ImageViewInfo&, GPUVAddr);
explicit ImageView(TextureCacheRuntime&, const VideoCommon::NullImageParams&);
[[nodiscard]] VkImageView DepthView();
[[nodiscard]] VkImageView StencilView();
- [[nodiscard]] VkImageView Handle(VideoCommon::ImageViewType query_type) const noexcept {
- return *image_views[static_cast<size_t>(query_type)];
- }
+ [[nodiscard]] VkImageView StorageView(Shader::TextureType texture_type,
+ Shader::ImageFormat image_format);
- [[nodiscard]] VkBufferView BufferView() const noexcept {
- return *buffer_view;
+ [[nodiscard]] VkImageView Handle(Shader::TextureType texture_type) const noexcept {
+ return *image_views[static_cast<size_t>(texture_type)];
}
[[nodiscard]] VkImage ImageHandle() const noexcept {
@@ -179,26 +149,36 @@ public:
return render_target;
}
- [[nodiscard]] PixelFormat ImageFormat() const noexcept {
- return image_format;
- }
-
[[nodiscard]] VkSampleCountFlagBits Samples() const noexcept {
return samples;
}
+ [[nodiscard]] GPUVAddr GpuAddr() const noexcept {
+ return gpu_addr;
+ }
+
+ [[nodiscard]] u32 BufferSize() const noexcept {
+ return buffer_size;
+ }
+
private:
- [[nodiscard]] vk::ImageView MakeDepthStencilView(VkImageAspectFlags aspect_mask);
+ struct StorageViews {
+ std::array<vk::ImageView, Shader::NUM_TEXTURE_TYPES> signeds;
+ std::array<vk::ImageView, Shader::NUM_TEXTURE_TYPES> unsigneds;
+ };
+
+ [[nodiscard]] vk::ImageView MakeView(VkFormat vk_format, VkImageAspectFlags aspect_mask);
const Device* device = nullptr;
- std::array<vk::ImageView, VideoCommon::NUM_IMAGE_VIEW_TYPES> image_views;
+ std::array<vk::ImageView, Shader::NUM_TEXTURE_TYPES> image_views;
+ std::unique_ptr<StorageViews> storage_views;
vk::ImageView depth_view;
vk::ImageView stencil_view;
- vk::BufferView buffer_view;
VkImage image_handle = VK_NULL_HANDLE;
VkImageView render_target = VK_NULL_HANDLE;
- PixelFormat image_format = PixelFormat::Invalid;
VkSampleCountFlagBits samples = VK_SAMPLE_COUNT_1_BIT;
+ GPUVAddr gpu_addr = 0;
+ u32 buffer_size = 0;
};
class ImageAlloc : public VideoCommon::ImageAllocBase {};
diff --git a/src/video_core/renderer_vulkan/vk_update_descriptor.cpp b/src/video_core/renderer_vulkan/vk_update_descriptor.cpp
index dc45fdcb1..0df3a7fe9 100644
--- a/src/video_core/renderer_vulkan/vk_update_descriptor.cpp
+++ b/src/video_core/renderer_vulkan/vk_update_descriptor.cpp
@@ -15,7 +15,9 @@
namespace Vulkan {
VKUpdateDescriptorQueue::VKUpdateDescriptorQueue(const Device& device_, VKScheduler& scheduler_)
- : device{device_}, scheduler{scheduler_} {}
+ : device{device_}, scheduler{scheduler_} {
+ payload_cursor = payload.data();
+}
VKUpdateDescriptorQueue::~VKUpdateDescriptorQueue() = default;
@@ -36,13 +38,4 @@ void VKUpdateDescriptorQueue::Acquire() {
upload_start = payload_cursor;
}
-void VKUpdateDescriptorQueue::Send(VkDescriptorUpdateTemplateKHR update_template,
- VkDescriptorSet set) {
- const void* const data = upload_start;
- const vk::Device* const logical = &device.GetLogical();
- scheduler.Record([data, logical, set, update_template](vk::CommandBuffer) {
- logical->UpdateDescriptorSet(set, update_template, data);
- });
-}
-
} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_update_descriptor.h b/src/video_core/renderer_vulkan/vk_update_descriptor.h
index d35e77c44..d7de4c490 100644
--- a/src/video_core/renderer_vulkan/vk_update_descriptor.h
+++ b/src/video_core/renderer_vulkan/vk_update_descriptor.h
@@ -39,7 +39,9 @@ public:
void Acquire();
- void Send(VkDescriptorUpdateTemplateKHR update_template, VkDescriptorSet set);
+ const DescriptorUpdateEntry* UpdateData() const noexcept {
+ return upload_start;
+ }
void AddSampledImage(VkImageView image_view, VkSampler sampler) {
*(payload_cursor++) = VkDescriptorImageInfo{