summaryrefslogtreecommitdiffstats
path: root/src/video_core/renderer_vulkan
diff options
context:
space:
mode:
authorRodrigo Locatti <reinuseslisp@airmail.cc>2021-03-31 00:31:52 +0200
committerGitHub <noreply@github.com>2021-03-31 00:31:52 +0200
commit5ee669466fcebd2258229ed6bfe6b5e5529e0200 (patch)
tree6dbf84fb5c2c9656f1d1ef6c46b2527ea1a205ff /src/video_core/renderer_vulkan
parentMerge pull request #6116 from german77/userArgument (diff)
parentastc_decoder: Refactor for style and more efficient memory use (diff)
downloadyuzu-5ee669466fcebd2258229ed6bfe6b5e5529e0200.tar
yuzu-5ee669466fcebd2258229ed6bfe6b5e5529e0200.tar.gz
yuzu-5ee669466fcebd2258229ed6bfe6b5e5529e0200.tar.bz2
yuzu-5ee669466fcebd2258229ed6bfe6b5e5529e0200.tar.lz
yuzu-5ee669466fcebd2258229ed6bfe6b5e5529e0200.tar.xz
yuzu-5ee669466fcebd2258229ed6bfe6b5e5529e0200.tar.zst
yuzu-5ee669466fcebd2258229ed6bfe6b5e5529e0200.zip
Diffstat (limited to 'src/video_core/renderer_vulkan')
-rw-r--r--src/video_core/renderer_vulkan/maxwell_to_vk.cpp2
-rw-r--r--src/video_core/renderer_vulkan/vk_compute_pass.cpp333
-rw-r--r--src/video_core/renderer_vulkan/vk_compute_pass.h32
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.cpp5
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.h1
-rw-r--r--src/video_core/renderer_vulkan/vk_texture_cache.cpp49
-rw-r--r--src/video_core/renderer_vulkan/vk_texture_cache.h19
7 files changed, 431 insertions, 10 deletions
diff --git a/src/video_core/renderer_vulkan/maxwell_to_vk.cpp b/src/video_core/renderer_vulkan/maxwell_to_vk.cpp
index 19aaf034f..f088447e9 100644
--- a/src/video_core/renderer_vulkan/maxwell_to_vk.cpp
+++ b/src/video_core/renderer_vulkan/maxwell_to_vk.cpp
@@ -166,7 +166,7 @@ struct FormatTuple {
{VK_FORMAT_R16G16_SINT, Attachable | Storage}, // R16G16_SINT
{VK_FORMAT_R16G16_SNORM, Attachable | Storage}, // R16G16_SNORM
{VK_FORMAT_UNDEFINED}, // R32G32B32_FLOAT
- {VK_FORMAT_R8G8B8A8_SRGB, Attachable}, // A8B8G8R8_SRGB
+ {VK_FORMAT_A8B8G8R8_SRGB_PACK32, Attachable}, // A8B8G8R8_SRGB
{VK_FORMAT_R8G8_UNORM, Attachable | Storage}, // R8G8_UNORM
{VK_FORMAT_R8G8_SNORM, Attachable | Storage}, // R8G8_SNORM
{VK_FORMAT_R8G8_SINT, Attachable | Storage}, // R8G8_SINT
diff --git a/src/video_core/renderer_vulkan/vk_compute_pass.cpp b/src/video_core/renderer_vulkan/vk_compute_pass.cpp
index 2f9a7b028..e11406e58 100644
--- a/src/video_core/renderer_vulkan/vk_compute_pass.cpp
+++ b/src/video_core/renderer_vulkan/vk_compute_pass.cpp
@@ -11,18 +11,39 @@
#include "common/assert.h"
#include "common/common_types.h"
#include "common/div_ceil.h"
+#include "video_core/host_shaders/astc_decoder_comp_spv.h"
#include "video_core/host_shaders/vulkan_quad_indexed_comp_spv.h"
#include "video_core/host_shaders/vulkan_uint8_comp_spv.h"
#include "video_core/renderer_vulkan/vk_compute_pass.h"
#include "video_core/renderer_vulkan/vk_descriptor_pool.h"
#include "video_core/renderer_vulkan/vk_scheduler.h"
#include "video_core/renderer_vulkan/vk_staging_buffer_pool.h"
+#include "video_core/renderer_vulkan/vk_texture_cache.h"
#include "video_core/renderer_vulkan/vk_update_descriptor.h"
+#include "video_core/texture_cache/accelerated_swizzle.h"
+#include "video_core/texture_cache/types.h"
+#include "video_core/textures/astc.h"
+#include "video_core/textures/decoders.h"
#include "video_core/vulkan_common/vulkan_device.h"
#include "video_core/vulkan_common/vulkan_wrapper.h"
namespace Vulkan {
+
+using Tegra::Texture::SWIZZLE_TABLE;
+using Tegra::Texture::ASTC::EncodingsValues;
+using namespace Tegra::Texture::ASTC;
+
namespace {
+
+constexpr u32 ASTC_BINDING_INPUT_BUFFER = 0;
+constexpr u32 ASTC_BINDING_ENC_BUFFER = 1;
+constexpr u32 ASTC_BINDING_6_TO_8_BUFFER = 2;
+constexpr u32 ASTC_BINDING_7_TO_8_BUFFER = 3;
+constexpr u32 ASTC_BINDING_8_TO_8_BUFFER = 4;
+constexpr u32 ASTC_BINDING_BYTE_TO_16_BUFFER = 5;
+constexpr u32 ASTC_BINDING_SWIZZLE_BUFFER = 6;
+constexpr u32 ASTC_BINDING_OUTPUT_IMAGE = 7;
+
VkPushConstantRange BuildComputePushConstantRange(std::size_t size) {
return {
.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
@@ -50,6 +71,67 @@ std::array<VkDescriptorSetLayoutBinding, 2> BuildInputOutputDescriptorSetBinding
}};
}
+std::array<VkDescriptorSetLayoutBinding, 8> BuildASTCDescriptorSetBindings() {
+ return {{
+ {
+ .binding = ASTC_BINDING_INPUT_BUFFER,
+ .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
+ .descriptorCount = 1,
+ .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
+ .pImmutableSamplers = nullptr,
+ },
+ {
+ .binding = ASTC_BINDING_ENC_BUFFER,
+ .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
+ .descriptorCount = 1,
+ .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
+ .pImmutableSamplers = nullptr,
+ },
+ {
+ .binding = ASTC_BINDING_6_TO_8_BUFFER,
+ .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
+ .descriptorCount = 1,
+ .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
+ .pImmutableSamplers = nullptr,
+ },
+ {
+ .binding = ASTC_BINDING_7_TO_8_BUFFER,
+ .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
+ .descriptorCount = 1,
+ .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
+ .pImmutableSamplers = nullptr,
+ },
+ {
+ .binding = ASTC_BINDING_8_TO_8_BUFFER,
+ .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
+ .descriptorCount = 1,
+ .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
+ .pImmutableSamplers = nullptr,
+ },
+ {
+ .binding = ASTC_BINDING_BYTE_TO_16_BUFFER,
+ .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
+ .descriptorCount = 1,
+ .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
+ .pImmutableSamplers = nullptr,
+ },
+ {
+ .binding = ASTC_BINDING_SWIZZLE_BUFFER,
+ .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
+ .descriptorCount = 1,
+ .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
+ .pImmutableSamplers = nullptr,
+ },
+ {
+ .binding = ASTC_BINDING_OUTPUT_IMAGE,
+ .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
+ .descriptorCount = 1,
+ .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
+ .pImmutableSamplers = nullptr,
+ },
+ }};
+}
+
VkDescriptorUpdateTemplateEntryKHR BuildInputOutputDescriptorUpdateTemplate() {
return {
.dstBinding = 0,
@@ -61,6 +143,94 @@ VkDescriptorUpdateTemplateEntryKHR BuildInputOutputDescriptorUpdateTemplate() {
};
}
+std::array<VkDescriptorUpdateTemplateEntryKHR, 8> BuildASTCPassDescriptorUpdateTemplateEntry() {
+ return {{
+ {
+ .dstBinding = ASTC_BINDING_INPUT_BUFFER,
+ .dstArrayElement = 0,
+ .descriptorCount = 1,
+ .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
+ .offset = ASTC_BINDING_INPUT_BUFFER * sizeof(DescriptorUpdateEntry),
+ .stride = sizeof(DescriptorUpdateEntry),
+ },
+ {
+ .dstBinding = ASTC_BINDING_ENC_BUFFER,
+ .dstArrayElement = 0,
+ .descriptorCount = 1,
+ .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
+ .offset = ASTC_BINDING_ENC_BUFFER * sizeof(DescriptorUpdateEntry),
+ .stride = sizeof(DescriptorUpdateEntry),
+ },
+ {
+ .dstBinding = ASTC_BINDING_6_TO_8_BUFFER,
+ .dstArrayElement = 0,
+ .descriptorCount = 1,
+ .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
+ .offset = ASTC_BINDING_6_TO_8_BUFFER * sizeof(DescriptorUpdateEntry),
+ .stride = sizeof(DescriptorUpdateEntry),
+ },
+ {
+ .dstBinding = ASTC_BINDING_7_TO_8_BUFFER,
+ .dstArrayElement = 0,
+ .descriptorCount = 1,
+ .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
+ .offset = ASTC_BINDING_7_TO_8_BUFFER * sizeof(DescriptorUpdateEntry),
+ .stride = sizeof(DescriptorUpdateEntry),
+ },
+ {
+ .dstBinding = ASTC_BINDING_8_TO_8_BUFFER,
+ .dstArrayElement = 0,
+ .descriptorCount = 1,
+ .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
+ .offset = ASTC_BINDING_8_TO_8_BUFFER * sizeof(DescriptorUpdateEntry),
+ .stride = sizeof(DescriptorUpdateEntry),
+ },
+ {
+ .dstBinding = ASTC_BINDING_BYTE_TO_16_BUFFER,
+ .dstArrayElement = 0,
+ .descriptorCount = 1,
+ .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
+ .offset = ASTC_BINDING_BYTE_TO_16_BUFFER * sizeof(DescriptorUpdateEntry),
+ .stride = sizeof(DescriptorUpdateEntry),
+ },
+ {
+ .dstBinding = ASTC_BINDING_SWIZZLE_BUFFER,
+ .dstArrayElement = 0,
+ .descriptorCount = 1,
+ .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
+ .offset = ASTC_BINDING_SWIZZLE_BUFFER * sizeof(DescriptorUpdateEntry),
+ .stride = sizeof(DescriptorUpdateEntry),
+ },
+ {
+ .dstBinding = ASTC_BINDING_OUTPUT_IMAGE,
+ .dstArrayElement = 0,
+ .descriptorCount = 1,
+ .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
+ .offset = ASTC_BINDING_OUTPUT_IMAGE * sizeof(DescriptorUpdateEntry),
+ .stride = sizeof(DescriptorUpdateEntry),
+ },
+ }};
+}
+
+struct AstcPushConstants {
+ std::array<u32, 2> blocks_dims;
+ u32 bytes_per_block_log2;
+ u32 layer_stride;
+ u32 block_size;
+ u32 x_shift;
+ u32 block_height;
+ u32 block_height_mask;
+};
+
+struct AstcBufferData {
+ decltype(SWIZZLE_TABLE) swizzle_table_buffer = SWIZZLE_TABLE;
+ decltype(EncodingsValues) encoding_values = EncodingsValues;
+ decltype(REPLICATE_6_BIT_TO_8_TABLE) replicate_6_to_8 = REPLICATE_6_BIT_TO_8_TABLE;
+ decltype(REPLICATE_7_BIT_TO_8_TABLE) replicate_7_to_8 = REPLICATE_7_BIT_TO_8_TABLE;
+ decltype(REPLICATE_8_BIT_TO_8_TABLE) replicate_8_to_8 = REPLICATE_8_BIT_TO_8_TABLE;
+ decltype(REPLICATE_BYTE_TO_16_TABLE) replicate_byte_to_16 = REPLICATE_BYTE_TO_16_TABLE;
+} constexpr ASTC_BUFFER_DATA;
+
} // Anonymous namespace
VKComputePass::VKComputePass(const Device& device, VKDescriptorPool& descriptor_pool,
@@ -238,4 +408,167 @@ std::pair<VkBuffer, VkDeviceSize> QuadIndexedPass::Assemble(
return {staging.buffer, staging.offset};
}
+ASTCDecoderPass::ASTCDecoderPass(const Device& device_, VKScheduler& scheduler_,
+ VKDescriptorPool& descriptor_pool_,
+ StagingBufferPool& staging_buffer_pool_,
+ VKUpdateDescriptorQueue& update_descriptor_queue_,
+ MemoryAllocator& memory_allocator_)
+ : VKComputePass(device_, descriptor_pool_, BuildASTCDescriptorSetBindings(),
+ BuildASTCPassDescriptorUpdateTemplateEntry(),
+ BuildComputePushConstantRange(sizeof(AstcPushConstants)),
+ ASTC_DECODER_COMP_SPV),
+ device{device_}, scheduler{scheduler_}, staging_buffer_pool{staging_buffer_pool_},
+ update_descriptor_queue{update_descriptor_queue_}, memory_allocator{memory_allocator_} {}
+
+ASTCDecoderPass::~ASTCDecoderPass() = default;
+
+void ASTCDecoderPass::MakeDataBuffer() {
+ constexpr size_t TOTAL_BUFFER_SIZE = sizeof(ASTC_BUFFER_DATA) + sizeof(SWIZZLE_TABLE);
+ data_buffer = device.GetLogical().CreateBuffer(VkBufferCreateInfo{
+ .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
+ .pNext = nullptr,
+ .flags = 0,
+ .size = TOTAL_BUFFER_SIZE,
+ .usage = VK_BUFFER_USAGE_STORAGE_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT,
+ .sharingMode = VK_SHARING_MODE_EXCLUSIVE,
+ .queueFamilyIndexCount = 0,
+ .pQueueFamilyIndices = nullptr,
+ });
+ data_buffer_commit = memory_allocator.Commit(data_buffer, MemoryUsage::Upload);
+
+ const auto staging_ref = staging_buffer_pool.Request(TOTAL_BUFFER_SIZE, MemoryUsage::Upload);
+ std::memcpy(staging_ref.mapped_span.data(), &ASTC_BUFFER_DATA, sizeof(ASTC_BUFFER_DATA));
+ // Tack on the swizzle table at the end of the buffer
+ std::memcpy(staging_ref.mapped_span.data() + sizeof(ASTC_BUFFER_DATA), &SWIZZLE_TABLE,
+ sizeof(SWIZZLE_TABLE));
+
+ scheduler.Record([src = staging_ref.buffer, offset = staging_ref.offset, dst = *data_buffer,
+ TOTAL_BUFFER_SIZE](vk::CommandBuffer cmdbuf) {
+ cmdbuf.CopyBuffer(src, dst,
+ VkBufferCopy{
+ .srcOffset = offset,
+ .dstOffset = 0,
+ .size = TOTAL_BUFFER_SIZE,
+ });
+ cmdbuf.PipelineBarrier(
+ VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0,
+ VkMemoryBarrier{
+ .sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER,
+ .pNext = nullptr,
+ .srcAccessMask = 0,
+ .dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_TRANSFER_WRITE_BIT,
+ });
+ });
+}
+
+void ASTCDecoderPass::Assemble(Image& image, const StagingBufferRef& map,
+ std::span<const VideoCommon::SwizzleParameters> swizzles) {
+ using namespace VideoCommon::Accelerated;
+ const std::array<u32, 2> block_dims{
+ VideoCore::Surface::DefaultBlockWidth(image.info.format),
+ VideoCore::Surface::DefaultBlockHeight(image.info.format),
+ };
+ scheduler.RequestOutsideRenderPassOperationContext();
+ if (!data_buffer) {
+ MakeDataBuffer();
+ }
+ const VkPipeline vk_pipeline = *pipeline;
+ const VkImageAspectFlags aspect_mask = image.AspectMask();
+ const VkImage vk_image = image.Handle();
+ const bool is_initialized = image.ExchangeInitialization();
+ scheduler.Record(
+ [vk_pipeline, vk_image, aspect_mask, is_initialized](vk::CommandBuffer cmdbuf) {
+ const VkImageMemoryBarrier image_barrier{
+ .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
+ .pNext = nullptr,
+ .srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT,
+ .dstAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT,
+ .oldLayout = is_initialized ? VK_IMAGE_LAYOUT_GENERAL : VK_IMAGE_LAYOUT_UNDEFINED,
+ .newLayout = VK_IMAGE_LAYOUT_GENERAL,
+ .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
+ .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
+ .image = vk_image,
+ .subresourceRange{
+ .aspectMask = aspect_mask,
+ .baseMipLevel = 0,
+ .levelCount = VK_REMAINING_MIP_LEVELS,
+ .baseArrayLayer = 0,
+ .layerCount = VK_REMAINING_ARRAY_LAYERS,
+ },
+ };
+ cmdbuf.PipelineBarrier(is_initialized ? VK_PIPELINE_STAGE_ALL_COMMANDS_BIT : 0,
+ VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, image_barrier);
+ cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, vk_pipeline);
+ });
+ for (const VideoCommon::SwizzleParameters& swizzle : swizzles) {
+ const size_t input_offset = swizzle.buffer_offset + map.offset;
+ const u32 num_dispatches_x = Common::DivCeil(swizzle.num_tiles.width, 32U);
+ const u32 num_dispatches_y = Common::DivCeil(swizzle.num_tiles.height, 32U);
+ const u32 num_dispatches_z = image.info.resources.layers;
+
+ update_descriptor_queue.Acquire();
+ update_descriptor_queue.AddBuffer(map.buffer, input_offset,
+ image.guest_size_bytes - swizzle.buffer_offset);
+ update_descriptor_queue.AddBuffer(*data_buffer, offsetof(AstcBufferData, encoding_values),
+ sizeof(AstcBufferData::encoding_values));
+ update_descriptor_queue.AddBuffer(*data_buffer, offsetof(AstcBufferData, replicate_6_to_8),
+ sizeof(AstcBufferData::replicate_6_to_8));
+ update_descriptor_queue.AddBuffer(*data_buffer, offsetof(AstcBufferData, replicate_7_to_8),
+ sizeof(AstcBufferData::replicate_7_to_8));
+ update_descriptor_queue.AddBuffer(*data_buffer, offsetof(AstcBufferData, replicate_8_to_8),
+ sizeof(AstcBufferData::replicate_8_to_8));
+ update_descriptor_queue.AddBuffer(*data_buffer,
+ offsetof(AstcBufferData, replicate_byte_to_16),
+ sizeof(AstcBufferData::replicate_byte_to_16));
+ update_descriptor_queue.AddBuffer(*data_buffer, sizeof(AstcBufferData),
+ sizeof(SWIZZLE_TABLE));
+ update_descriptor_queue.AddImage(image.StorageImageView(swizzle.level));
+
+ const VkDescriptorSet set = CommitDescriptorSet(update_descriptor_queue);
+ const VkPipelineLayout vk_layout = *layout;
+
+ // To unswizzle the ASTC data
+ const auto params = MakeBlockLinearSwizzle2DParams(swizzle, image.info);
+ ASSERT(params.origin == (std::array<u32, 3>{0, 0, 0}));
+ ASSERT(params.destination == (std::array<s32, 3>{0, 0, 0}));
+ scheduler.Record([vk_layout, num_dispatches_x, num_dispatches_y, num_dispatches_z,
+ block_dims, params, set](vk::CommandBuffer cmdbuf) {
+ const AstcPushConstants uniforms{
+ .blocks_dims = block_dims,
+ .bytes_per_block_log2 = params.bytes_per_block_log2,
+ .layer_stride = params.layer_stride,
+ .block_size = params.block_size,
+ .x_shift = params.x_shift,
+ .block_height = params.block_height,
+ .block_height_mask = params.block_height_mask,
+ };
+ cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_COMPUTE, vk_layout, 0, set, {});
+ cmdbuf.PushConstants(vk_layout, VK_SHADER_STAGE_COMPUTE_BIT, uniforms);
+ cmdbuf.Dispatch(num_dispatches_x, num_dispatches_y, num_dispatches_z);
+ });
+ }
+ scheduler.Record([vk_image, aspect_mask](vk::CommandBuffer cmdbuf) {
+ const VkImageMemoryBarrier image_barrier{
+ .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
+ .pNext = nullptr,
+ .srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT,
+ .dstAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT,
+ .oldLayout = VK_IMAGE_LAYOUT_GENERAL,
+ .newLayout = VK_IMAGE_LAYOUT_GENERAL,
+ .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
+ .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
+ .image = vk_image,
+ .subresourceRange{
+ .aspectMask = aspect_mask,
+ .baseMipLevel = 0,
+ .levelCount = VK_REMAINING_MIP_LEVELS,
+ .baseArrayLayer = 0,
+ .layerCount = VK_REMAINING_ARRAY_LAYERS,
+ },
+ };
+ cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
+ VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, image_barrier);
+ });
+}
+
} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_compute_pass.h b/src/video_core/renderer_vulkan/vk_compute_pass.h
index 17d781d99..5ea187c30 100644
--- a/src/video_core/renderer_vulkan/vk_compute_pass.h
+++ b/src/video_core/renderer_vulkan/vk_compute_pass.h
@@ -11,14 +11,21 @@
#include "common/common_types.h"
#include "video_core/engines/maxwell_3d.h"
#include "video_core/renderer_vulkan/vk_descriptor_pool.h"
+#include "video_core/vulkan_common/vulkan_memory_allocator.h"
#include "video_core/vulkan_common/vulkan_wrapper.h"
+namespace VideoCommon {
+struct SwizzleParameters;
+}
+
namespace Vulkan {
class Device;
class StagingBufferPool;
class VKScheduler;
class VKUpdateDescriptorQueue;
+class Image;
+struct StagingBufferRef;
class VKComputePass {
public:
@@ -77,4 +84,29 @@ private:
VKUpdateDescriptorQueue& update_descriptor_queue;
};
+class ASTCDecoderPass final : public VKComputePass {
+public:
+ explicit ASTCDecoderPass(const Device& device_, VKScheduler& scheduler_,
+ VKDescriptorPool& descriptor_pool_,
+ StagingBufferPool& staging_buffer_pool_,
+ VKUpdateDescriptorQueue& update_descriptor_queue_,
+ MemoryAllocator& memory_allocator_);
+ ~ASTCDecoderPass();
+
+ void Assemble(Image& image, const StagingBufferRef& map,
+ std::span<const VideoCommon::SwizzleParameters> swizzles);
+
+private:
+ void MakeDataBuffer();
+
+ const Device& device;
+ VKScheduler& scheduler;
+ StagingBufferPool& staging_buffer_pool;
+ VKUpdateDescriptorQueue& update_descriptor_queue;
+ MemoryAllocator& memory_allocator;
+
+ vk::Buffer data_buffer;
+ MemoryCommit data_buffer_commit;
+};
+
} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
index dfd38f575..df5b7b172 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
@@ -241,7 +241,10 @@ RasterizerVulkan::RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra
staging_pool(device, memory_allocator, scheduler), descriptor_pool(device, scheduler),
update_descriptor_queue(device, scheduler),
blit_image(device, scheduler, state_tracker, descriptor_pool),
- texture_cache_runtime{device, scheduler, memory_allocator, staging_pool, blit_image},
+ astc_decoder_pass(device, scheduler, descriptor_pool, staging_pool, update_descriptor_queue,
+ memory_allocator),
+ texture_cache_runtime{device, scheduler, memory_allocator,
+ staging_pool, blit_image, astc_decoder_pass},
texture_cache(texture_cache_runtime, *this, maxwell3d, kepler_compute, gpu_memory),
buffer_cache_runtime(device, memory_allocator, scheduler, staging_pool,
update_descriptor_queue, descriptor_pool),
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.h b/src/video_core/renderer_vulkan/vk_rasterizer.h
index acea1ba2d..235afc6f3 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.h
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.h
@@ -173,6 +173,7 @@ private:
VKDescriptorPool descriptor_pool;
VKUpdateDescriptorQueue update_descriptor_queue;
BlitImageHelper blit_image;
+ ASTCDecoderPass astc_decoder_pass;
GraphicsPipelineCacheKey graphics_key;
diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.cpp b/src/video_core/renderer_vulkan/vk_texture_cache.cpp
index 22a1014a9..18155e449 100644
--- a/src/video_core/renderer_vulkan/vk_texture_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_texture_cache.cpp
@@ -10,6 +10,7 @@
#include "video_core/engines/fermi_2d.h"
#include "video_core/renderer_vulkan/blit_image.h"
#include "video_core/renderer_vulkan/maxwell_to_vk.h"
+#include "video_core/renderer_vulkan/vk_compute_pass.h"
#include "video_core/renderer_vulkan/vk_rasterizer.h"
#include "video_core/renderer_vulkan/vk_scheduler.h"
#include "video_core/renderer_vulkan/vk_staging_buffer_pool.h"
@@ -807,7 +808,7 @@ Image::Image(TextureCacheRuntime& runtime, const ImageInfo& info_, GPUVAddr gpu_
commit = runtime.memory_allocator.Commit(buffer, MemoryUsage::DeviceLocal);
}
if (IsPixelFormatASTC(info.format) && !runtime.device.IsOptimalAstcSupported()) {
- flags |= VideoCommon::ImageFlagBits::Converted;
+ flags |= VideoCommon::ImageFlagBits::AcceleratedUpload;
}
if (runtime.device.HasDebuggingToolAttached()) {
if (image) {
@@ -816,6 +817,38 @@ Image::Image(TextureCacheRuntime& runtime, const ImageInfo& info_, GPUVAddr gpu_
buffer.SetObjectNameEXT(VideoCommon::Name(*this).c_str());
}
}
+ static constexpr VkImageViewUsageCreateInfo storage_image_view_usage_create_info{
+ .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO,
+ .pNext = nullptr,
+ .usage = VK_IMAGE_USAGE_STORAGE_BIT,
+ };
+ if (IsPixelFormatASTC(info.format) && !runtime.device.IsOptimalAstcSupported()) {
+ const auto& device = runtime.device.GetLogical();
+ storage_image_views.reserve(info.resources.levels);
+ for (s32 level = 0; level < info.resources.levels; ++level) {
+ storage_image_views.push_back(device.CreateImageView(VkImageViewCreateInfo{
+ .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
+ .pNext = &storage_image_view_usage_create_info,
+ .flags = 0,
+ .image = *image,
+ .viewType = VK_IMAGE_VIEW_TYPE_2D_ARRAY,
+ .format = VK_FORMAT_A8B8G8R8_UNORM_PACK32,
+ .components{
+ .r = VK_COMPONENT_SWIZZLE_IDENTITY,
+ .g = VK_COMPONENT_SWIZZLE_IDENTITY,
+ .b = VK_COMPONENT_SWIZZLE_IDENTITY,
+ .a = VK_COMPONENT_SWIZZLE_IDENTITY,
+ },
+ .subresourceRange{
+ .aspectMask = aspect_mask,
+ .baseMipLevel = static_cast<u32>(level),
+ .levelCount = 1,
+ .baseArrayLayer = 0,
+ .layerCount = VK_REMAINING_ARRAY_LAYERS,
+ },
+ }));
+ }
+ }
}
void Image::UploadMemory(const StagingBufferRef& map, std::span<const BufferImageCopy> copies) {
@@ -918,7 +951,6 @@ ImageView::ImageView(TextureCacheRuntime& runtime, const VideoCommon::ImageViewI
}
}
const auto format_info = MaxwellToVK::SurfaceFormat(*device, FormatType::Optimal, true, format);
- const VkFormat vk_format = format_info.format;
const VkImageViewUsageCreateInfo image_view_usage{
.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO,
.pNext = nullptr,
@@ -930,7 +962,7 @@ ImageView::ImageView(TextureCacheRuntime& runtime, const VideoCommon::ImageViewI
.flags = 0,
.image = image.Handle(),
.viewType = VkImageViewType{},
- .format = vk_format,
+ .format = format_info.format,
.components{
.r = ComponentSwizzle(swizzle[0]),
.g = ComponentSwizzle(swizzle[1]),
@@ -982,7 +1014,7 @@ ImageView::ImageView(TextureCacheRuntime& runtime, const VideoCommon::ImageViewI
.pNext = nullptr,
.flags = 0,
.buffer = image.Buffer(),
- .format = vk_format,
+ .format = format_info.format,
.offset = 0, // TODO: Redesign buffer cache to support this
.range = image.guest_size_bytes,
});
@@ -1167,4 +1199,13 @@ Framebuffer::Framebuffer(TextureCacheRuntime& runtime, std::span<ImageView*, NUM
}
}
+void TextureCacheRuntime::AccelerateImageUpload(
+ Image& image, const StagingBufferRef& map,
+ std::span<const VideoCommon::SwizzleParameters> swizzles) {
+ if (IsPixelFormatASTC(image.info.format)) {
+ return astc_decoder_pass.Assemble(image, map, swizzles);
+ }
+ UNREACHABLE();
+}
+
} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.h b/src/video_core/renderer_vulkan/vk_texture_cache.h
index 3aee27ce0..628785d5e 100644
--- a/src/video_core/renderer_vulkan/vk_texture_cache.h
+++ b/src/video_core/renderer_vulkan/vk_texture_cache.h
@@ -20,6 +20,7 @@ using VideoCommon::Offset2D;
using VideoCommon::RenderTargets;
using VideoCore::Surface::PixelFormat;
+class ASTCDecoderPass;
class BlitImageHelper;
class Device;
class Image;
@@ -60,6 +61,7 @@ struct TextureCacheRuntime {
MemoryAllocator& memory_allocator;
StagingBufferPool& staging_buffer_pool;
BlitImageHelper& blit_image_helper;
+ ASTCDecoderPass& astc_decoder_pass;
std::unordered_map<RenderPassKey, vk::RenderPass> renderpass_cache{};
void Finish();
@@ -83,9 +85,7 @@ struct TextureCacheRuntime {
}
void AccelerateImageUpload(Image&, const StagingBufferRef&,
- std::span<const VideoCommon::SwizzleParameters>) {
- UNREACHABLE();
- }
+ std::span<const VideoCommon::SwizzleParameters>);
void InsertUploadMemoryBarrier() {}
@@ -121,15 +121,26 @@ public:
return *buffer;
}
- [[nodiscard]] VkImageCreateFlags AspectMask() const noexcept {
+ [[nodiscard]] VkImageAspectFlags AspectMask() const noexcept {
return aspect_mask;
}
+ [[nodiscard]] VkImageView StorageImageView(s32 level) const noexcept {
+ return *storage_image_views[level];
+ }
+
+ /// Returns true when the image is already initialized and mark it as initialized
+ [[nodiscard]] bool ExchangeInitialization() noexcept {
+ return std::exchange(initialized, true);
+ }
+
private:
VKScheduler* scheduler;
vk::Image image;
vk::Buffer buffer;
MemoryCommit commit;
+ vk::ImageView image_view;
+ std::vector<vk::ImageView> storage_image_views;
VkImageAspectFlags aspect_mask = 0;
bool initialized = false;
};