summaryrefslogtreecommitdiffstats
path: root/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp
blob: 3d690f33500efb50b69375f0bb5e2058b37c6d9a (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
// Copyright 2019 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.

#include <vector>

#include <boost/container/small_vector.hpp>

#include "video_core/renderer_vulkan/pipeline_helper.h"
#include "video_core/renderer_vulkan/vk_buffer_cache.h"
#include "video_core/renderer_vulkan/vk_compute_pipeline.h"
#include "video_core/renderer_vulkan/vk_descriptor_pool.h"
#include "video_core/renderer_vulkan/vk_pipeline_cache.h"
#include "video_core/renderer_vulkan/vk_scheduler.h"
#include "video_core/renderer_vulkan/vk_update_descriptor.h"
#include "video_core/vulkan_common/vulkan_device.h"
#include "video_core/vulkan_common/vulkan_wrapper.h"

namespace Vulkan {

ComputePipeline::ComputePipeline(const Device& device, VKDescriptorPool& descriptor_pool,
                                 VKUpdateDescriptorQueue& update_descriptor_queue_,
                                 Common::ThreadWorker* thread_worker, const Shader::Info& info_,
                                 vk::ShaderModule spv_module_)
    : update_descriptor_queue{update_descriptor_queue_}, info{info_},
      spv_module(std::move(spv_module_)) {
    DescriptorLayoutBuilder builder{device.GetLogical()};
    builder.Add(info, VK_SHADER_STAGE_COMPUTE_BIT);

    descriptor_set_layout = builder.CreateDescriptorSetLayout();
    pipeline_layout = builder.CreatePipelineLayout(*descriptor_set_layout);
    descriptor_update_template = builder.CreateTemplate(*descriptor_set_layout, *pipeline_layout);
    descriptor_allocator = DescriptorAllocator(descriptor_pool, *descriptor_set_layout);

    auto func{[this, &device] {
        const VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT subgroup_size_ci{
            .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT,
            .pNext = nullptr,
            .requiredSubgroupSize = GuestWarpSize,
        };
        pipeline = device.GetLogical().CreateComputePipeline({
            .sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
            .pNext = nullptr,
            .flags = 0,
            .stage{
                .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
                .pNext = device.IsExtSubgroupSizeControlSupported() ? &subgroup_size_ci : nullptr,
                .flags = 0,
                .stage = VK_SHADER_STAGE_COMPUTE_BIT,
                .module = *spv_module,
                .pName = "main",
                .pSpecializationInfo = nullptr,
            },
            .layout = *pipeline_layout,
            .basePipelineHandle = 0,
            .basePipelineIndex = 0,
        });
        std::lock_guard lock{build_mutex};
        is_built = true;
        build_condvar.notify_one();
    }};
    if (thread_worker) {
        thread_worker->QueueWork(std::move(func));
    } else {
        func();
    }
}

void ComputePipeline::Configure(Tegra::Engines::KeplerCompute& kepler_compute,
                                Tegra::MemoryManager& gpu_memory, VKScheduler& scheduler,
                                BufferCache& buffer_cache, TextureCache& texture_cache) {
    update_descriptor_queue.Acquire();

    buffer_cache.SetEnabledComputeUniformBuffers(info.constant_buffer_mask);
    buffer_cache.UnbindComputeStorageBuffers();
    size_t ssbo_index{};
    for (const auto& desc : info.storage_buffers_descriptors) {
        ASSERT(desc.count == 1);
        buffer_cache.BindComputeStorageBuffer(ssbo_index, desc.cbuf_index, desc.cbuf_offset,
                                              desc.is_written);
        ++ssbo_index;
    }

    texture_cache.SynchronizeComputeDescriptors();

    static constexpr size_t max_elements = 64;
    std::array<ImageId, max_elements> image_view_ids;
    boost::container::static_vector<u32, max_elements> image_view_indices;
    boost::container::static_vector<VkSampler, max_elements> samplers;

    const auto& launch_desc{kepler_compute.launch_description};
    const auto& cbufs{launch_desc.const_buffer_config};
    const bool via_header_index{launch_desc.linked_tsc};
    const auto read_handle{[&](u32 cbuf_index, u32 cbuf_offset) {
        ASSERT(((launch_desc.const_buffer_enable_mask >> cbuf_index) & 1) != 0);
        const GPUVAddr addr{cbufs[cbuf_index].Address() + cbuf_offset};
        const u32 raw_handle{gpu_memory.Read<u32>(addr)};
        return TextureHandle(raw_handle, via_header_index);
    }};
    for (const auto& desc : info.texture_buffer_descriptors) {
        const TextureHandle handle{read_handle(desc.cbuf_index, desc.cbuf_offset)};
        image_view_indices.push_back(handle.image);
    }
    for (const auto& desc : info.texture_descriptors) {
        const TextureHandle handle{read_handle(desc.cbuf_index, desc.cbuf_offset)};
        image_view_indices.push_back(handle.image);

        Sampler* const sampler = texture_cache.GetComputeSampler(handle.sampler);
        samplers.push_back(sampler->Handle());
    }
    for (const auto& desc : info.image_descriptors) {
        const TextureHandle handle{read_handle(desc.cbuf_index, desc.cbuf_offset)};
        image_view_indices.push_back(handle.image);
    }
    const std::span indices_span(image_view_indices.data(), image_view_indices.size());
    texture_cache.FillComputeImageViews(indices_span, image_view_ids);

    buffer_cache.UnbindComputeTextureBuffers();
    ImageId* texture_buffer_ids{image_view_ids.data()};
    size_t index{};
    for (const auto& desc : info.texture_buffer_descriptors) {
        ASSERT(desc.count == 1);
        ImageView& image_view = texture_cache.GetImageView(*texture_buffer_ids);
        buffer_cache.BindComputeTextureBuffer(index, image_view.GpuAddr(), image_view.BufferSize(),
                                              image_view.format);
        ++texture_buffer_ids;
        ++index;
    }
    buffer_cache.UpdateComputeBuffers();
    buffer_cache.BindHostComputeBuffers();

    const VkSampler* samplers_it{samplers.data()};
    const ImageId* views_it{image_view_ids.data()};
    PushImageDescriptors(info, samplers_it, views_it, texture_cache, update_descriptor_queue);

    if (!is_built.load(std::memory_order::relaxed)) {
        // Wait for the pipeline to be built
        scheduler.Record([this](vk::CommandBuffer) {
            std::unique_lock lock{build_mutex};
            build_condvar.wait(lock, [this] { return is_built.load(std::memory_order::relaxed); });
        });
    }
    scheduler.Record([this](vk::CommandBuffer cmdbuf) {
        cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline);
    });
    if (!descriptor_set_layout) {
        return;
    }
    const VkDescriptorSet descriptor_set{descriptor_allocator.Commit()};
    update_descriptor_queue.Send(descriptor_update_template.address(), descriptor_set);
    scheduler.Record([this, descriptor_set](vk::CommandBuffer cmdbuf) {
        cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline_layout, 0,
                                  descriptor_set, nullptr);
    });
}

} // namespace Vulkan