summaryrefslogtreecommitdiffstats
path: root/src/video_core
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--src/video_core/cdma_pusher.cpp63
-rw-r--r--src/video_core/cdma_pusher.h33
-rw-r--r--src/video_core/command_classes/codecs/codec.cpp7
-rw-r--r--src/video_core/command_classes/nvdec.cpp8
-rw-r--r--src/video_core/command_classes/nvdec.h2
-rw-r--r--src/video_core/command_classes/vic.cpp45
-rw-r--r--src/video_core/command_classes/vic.h51
-rw-r--r--src/video_core/gpu.cpp6
-rw-r--r--src/video_core/gpu_thread.cpp3
-rw-r--r--src/video_core/renderer_opengl/gl_shader_cache.cpp4
-rw-r--r--src/video_core/renderer_vulkan/fixed_pipeline_state.cpp83
-rw-r--r--src/video_core/renderer_vulkan/fixed_pipeline_state.h18
-rw-r--r--src/video_core/renderer_vulkan/vk_buffer_cache.cpp4
-rw-r--r--src/video_core/renderer_vulkan/vk_buffer_cache.h6
-rw-r--r--src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp3
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.cpp12
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.h3
-rw-r--r--src/video_core/renderer_vulkan/vk_state_tracker.cpp34
-rw-r--r--src/video_core/renderer_vulkan/vk_state_tracker.h5
-rw-r--r--src/video_core/renderer_vulkan/vk_update_descriptor.cpp10
-rw-r--r--src/video_core/renderer_vulkan/vk_update_descriptor.h28
21 files changed, 202 insertions, 226 deletions
diff --git a/src/video_core/cdma_pusher.cpp b/src/video_core/cdma_pusher.cpp
index 33b3c060b..a3fda1094 100644
--- a/src/video_core/cdma_pusher.cpp
+++ b/src/video_core/cdma_pusher.cpp
@@ -37,59 +37,43 @@ CDmaPusher::CDmaPusher(GPU& gpu_)
CDmaPusher::~CDmaPusher() = default;
-void CDmaPusher::Push(ChCommandHeaderList&& entries) {
- cdma_queue.push(std::move(entries));
-}
-
-void CDmaPusher::DispatchCalls() {
- while (!cdma_queue.empty()) {
- Step();
- }
-}
-
-void CDmaPusher::Step() {
- const auto entries{cdma_queue.front()};
- cdma_queue.pop();
-
- std::vector<u32> values(entries.size());
- std::memcpy(values.data(), entries.data(), entries.size() * sizeof(u32));
-
- for (const u32 value : values) {
+void CDmaPusher::ProcessEntries(ChCommandHeaderList&& entries) {
+ for (const auto& value : entries) {
if (mask != 0) {
const auto lbs = static_cast<u32>(std::countr_zero(mask));
mask &= ~(1U << lbs);
- ExecuteCommand(static_cast<u32>(offset + lbs), value);
+ ExecuteCommand(offset + lbs, value.raw);
continue;
} else if (count != 0) {
--count;
- ExecuteCommand(static_cast<u32>(offset), value);
+ ExecuteCommand(offset, value.raw);
if (incrementing) {
++offset;
}
continue;
}
- const auto mode = static_cast<ChSubmissionMode>((value >> 28) & 0xf);
+ const auto mode = value.submission_mode.Value();
switch (mode) {
case ChSubmissionMode::SetClass: {
- mask = value & 0x3f;
- offset = (value >> 16) & 0xfff;
- current_class = static_cast<ChClassId>((value >> 6) & 0x3ff);
+ mask = value.value & 0x3f;
+ offset = value.method_offset;
+ current_class = static_cast<ChClassId>((value.value >> 6) & 0x3ff);
break;
}
case ChSubmissionMode::Incrementing:
case ChSubmissionMode::NonIncrementing:
- count = value & 0xffff;
- offset = (value >> 16) & 0xfff;
+ count = value.value;
+ offset = value.method_offset;
incrementing = mode == ChSubmissionMode::Incrementing;
break;
case ChSubmissionMode::Mask:
- mask = value & 0xffff;
- offset = (value >> 16) & 0xfff;
+ mask = value.value;
+ offset = value.method_offset;
break;
case ChSubmissionMode::Immediate: {
- const u32 data = value & 0xfff;
- offset = (value >> 16) & 0xfff;
- ExecuteCommand(static_cast<u32>(offset), data);
+ const u32 data = value.value & 0xfff;
+ offset = value.method_offset;
+ ExecuteCommand(offset, data);
break;
}
default:
@@ -102,8 +86,8 @@ void CDmaPusher::Step() {
void CDmaPusher::ExecuteCommand(u32 state_offset, u32 data) {
switch (current_class) {
case ChClassId::NvDec:
- ThiStateWrite(nvdec_thi_state, state_offset, {data});
- switch (static_cast<ThiMethod>(state_offset)) {
+ ThiStateWrite(nvdec_thi_state, offset, data);
+ switch (static_cast<ThiMethod>(offset)) {
case ThiMethod::IncSyncpt: {
LOG_DEBUG(Service_NVDRV, "NVDEC Class IncSyncpt Method");
const auto syncpoint_id = static_cast<u32>(data & 0xFF);
@@ -120,7 +104,7 @@ void CDmaPusher::ExecuteCommand(u32 state_offset, u32 data) {
LOG_DEBUG(Service_NVDRV, "NVDEC method 0x{:X}",
static_cast<u32>(nvdec_thi_state.method_0));
nvdec_processor->ProcessMethod(static_cast<Nvdec::Method>(nvdec_thi_state.method_0),
- {data});
+ data);
break;
default:
break;
@@ -144,7 +128,7 @@ void CDmaPusher::ExecuteCommand(u32 state_offset, u32 data) {
case ThiMethod::SetMethod1:
LOG_DEBUG(Service_NVDRV, "VIC method 0x{:X}, Args=({})",
static_cast<u32>(vic_thi_state.method_0), data);
- vic_processor->ProcessMethod(static_cast<Vic::Method>(vic_thi_state.method_0), {data});
+ vic_processor->ProcessMethod(static_cast<Vic::Method>(vic_thi_state.method_0), data);
break;
default:
break;
@@ -153,7 +137,7 @@ void CDmaPusher::ExecuteCommand(u32 state_offset, u32 data) {
case ChClassId::Host1x:
// This device is mainly for syncpoint synchronization
LOG_DEBUG(Service_NVDRV, "Host1X Class Method");
- host1x_processor->ProcessMethod(static_cast<Host1x::Method>(state_offset), {data});
+ host1x_processor->ProcessMethod(static_cast<Host1x::Method>(offset), data);
break;
default:
UNIMPLEMENTED_MSG("Current class not implemented {:X}", static_cast<u32>(current_class));
@@ -161,10 +145,9 @@ void CDmaPusher::ExecuteCommand(u32 state_offset, u32 data) {
}
}
-void CDmaPusher::ThiStateWrite(ThiRegisters& state, u32 state_offset,
- const std::vector<u32>& arguments) {
- u8* const state_offset_ptr = reinterpret_cast<u8*>(&state) + sizeof(u32) * state_offset;
- std::memcpy(state_offset_ptr, arguments.data(), sizeof(u32) * arguments.size());
+void CDmaPusher::ThiStateWrite(ThiRegisters& state, u32 state_offset, u32 argument) {
+ u8* const offset_ptr = reinterpret_cast<u8*>(&state) + sizeof(u32) * state_offset;
+ std::memcpy(offset_ptr, &argument, sizeof(u32));
}
} // namespace Tegra
diff --git a/src/video_core/cdma_pusher.h b/src/video_core/cdma_pusher.h
index e5f212c1a..1bada44dd 100644
--- a/src/video_core/cdma_pusher.h
+++ b/src/video_core/cdma_pusher.h
@@ -5,9 +5,7 @@
#pragma once
#include <memory>
-#include <unordered_map>
#include <vector>
-#include <queue>
#include "common/bit_field.h"
#include "common/common_types.h"
@@ -16,9 +14,9 @@
namespace Tegra {
class GPU;
+class Host1x;
class Nvdec;
class Vic;
-class Host1x;
enum class ChSubmissionMode : u32 {
SetClass = 0,
@@ -48,16 +46,10 @@ enum class ChClassId : u32 {
NvDec = 0xf0
};
-enum class ChMethod : u32 {
- Empty = 0,
- SetMethod = 0x10,
- SetData = 0x11,
-};
-
union ChCommandHeader {
u32 raw;
BitField<0, 16, u32> value;
- BitField<16, 12, ChMethod> method_offset;
+ BitField<16, 12, u32> method_offset;
BitField<28, 4, ChSubmissionMode> submission_mode;
};
static_assert(sizeof(ChCommandHeader) == sizeof(u32), "ChCommand header is an invalid size");
@@ -99,21 +91,15 @@ public:
explicit CDmaPusher(GPU& gpu_);
~CDmaPusher();
- /// Push NVDEC command buffer entries into queue
- void Push(ChCommandHeaderList&& entries);
-
- /// Process queued command buffer entries
- void DispatchCalls();
-
- /// Process one queue element
- void Step();
+ /// Process the command entry
+ void ProcessEntries(ChCommandHeaderList&& entries);
+private:
/// Invoke command class devices to execute the command based on the current state
void ExecuteCommand(u32 state_offset, u32 data);
-private:
/// Write arguments value to the ThiRegisters member at the specified offset
- void ThiStateWrite(ThiRegisters& state, u32 state_offset, const std::vector<u32>& arguments);
+ void ThiStateWrite(ThiRegisters& state, u32 offset, u32 argument);
GPU& gpu;
std::shared_ptr<Tegra::Nvdec> nvdec_processor;
@@ -124,13 +110,10 @@ private:
ThiRegisters vic_thi_state{};
ThiRegisters nvdec_thi_state{};
- s32 count{};
- s32 offset{};
+ u32 count{};
+ u32 offset{};
u32 mask{};
bool incrementing{};
-
- // Queue of command lists to be processed
- std::queue<ChCommandHeaderList> cdma_queue;
};
} // namespace Tegra
diff --git a/src/video_core/command_classes/codecs/codec.cpp b/src/video_core/command_classes/codecs/codec.cpp
index 39bc923a5..d02dc6260 100644
--- a/src/video_core/command_classes/codecs/codec.cpp
+++ b/src/video_core/command_classes/codecs/codec.cpp
@@ -44,8 +44,10 @@ Codec::~Codec() {
}
void Codec::SetTargetCodec(NvdecCommon::VideoCodec codec) {
- LOG_INFO(Service_NVDRV, "NVDEC video codec initialized to {}", codec);
- current_codec = codec;
+ if (current_codec != codec) {
+ LOG_INFO(Service_NVDRV, "NVDEC video codec initialized to {}", static_cast<u32>(codec));
+ current_codec = codec;
+ }
}
void Codec::StateWrite(u32 offset, u64 arguments) {
@@ -55,7 +57,6 @@ void Codec::StateWrite(u32 offset, u64 arguments) {
void Codec::Decode() {
bool is_first_frame = false;
-
if (!initialized) {
if (current_codec == NvdecCommon::VideoCodec::H264) {
av_codec = avcodec_find_decoder(AV_CODEC_ID_H264);
diff --git a/src/video_core/command_classes/nvdec.cpp b/src/video_core/command_classes/nvdec.cpp
index 79e1f4e13..e4f919afd 100644
--- a/src/video_core/command_classes/nvdec.cpp
+++ b/src/video_core/command_classes/nvdec.cpp
@@ -12,16 +12,16 @@ Nvdec::Nvdec(GPU& gpu_) : gpu(gpu_), codec(std::make_unique<Codec>(gpu)) {}
Nvdec::~Nvdec() = default;
-void Nvdec::ProcessMethod(Method method, const std::vector<u32>& arguments) {
+void Nvdec::ProcessMethod(Method method, u32 argument) {
if (method == Method::SetVideoCodec) {
- codec->StateWrite(static_cast<u32>(method), arguments[0]);
+ codec->StateWrite(static_cast<u32>(method), argument);
} else {
- codec->StateWrite(static_cast<u32>(method), static_cast<u64>(arguments[0]) << 8);
+ codec->StateWrite(static_cast<u32>(method), static_cast<u64>(argument) << 8);
}
switch (method) {
case Method::SetVideoCodec:
- codec->SetTargetCodec(static_cast<NvdecCommon::VideoCodec>(arguments[0]));
+ codec->SetTargetCodec(static_cast<NvdecCommon::VideoCodec>(argument));
break;
case Method::Execute:
Execute();
diff --git a/src/video_core/command_classes/nvdec.h b/src/video_core/command_classes/nvdec.h
index e4877c533..e66be80b8 100644
--- a/src/video_core/command_classes/nvdec.h
+++ b/src/video_core/command_classes/nvdec.h
@@ -23,7 +23,7 @@ public:
~Nvdec();
/// Writes the method into the state, Invoke Execute() if encountered
- void ProcessMethod(Method method, const std::vector<u32>& arguments);
+ void ProcessMethod(Method method, u32 argument);
/// Return most recently decoded frame
[[nodiscard]] AVFramePtr GetFrame();
diff --git a/src/video_core/command_classes/vic.cpp b/src/video_core/command_classes/vic.cpp
index 2b7569335..0a8b82f2b 100644
--- a/src/video_core/command_classes/vic.cpp
+++ b/src/video_core/command_classes/vic.cpp
@@ -18,18 +18,14 @@ extern "C" {
namespace Tegra {
Vic::Vic(GPU& gpu_, std::shared_ptr<Nvdec> nvdec_processor_)
- : gpu(gpu_), nvdec_processor(std::move(nvdec_processor_)) {}
-Vic::~Vic() = default;
+ : gpu(gpu_),
+ nvdec_processor(std::move(nvdec_processor_)), converted_frame_buffer{nullptr, av_free} {}
-void Vic::VicStateWrite(u32 offset, u32 arguments) {
- u8* const state_offset = reinterpret_cast<u8*>(&vic_state) + offset * sizeof(u32);
- std::memcpy(state_offset, &arguments, sizeof(u32));
-}
+Vic::~Vic() = default;
-void Vic::ProcessMethod(Method method, const std::vector<u32>& arguments) {
- LOG_DEBUG(HW_GPU, "Vic method 0x{:X}", method);
- VicStateWrite(static_cast<u32>(method), arguments[0]);
- const u64 arg = static_cast<u64>(arguments[0]) << 8;
+void Vic::ProcessMethod(Method method, u32 argument) {
+ LOG_DEBUG(HW_GPU, "Vic method 0x{:X}", static_cast<u32>(method));
+ const u64 arg = static_cast<u64>(argument) << 8;
switch (method) {
case Method::Execute:
Execute();
@@ -53,8 +49,7 @@ void Vic::ProcessMethod(Method method, const std::vector<u32>& arguments) {
void Vic::Execute() {
if (output_surface_luma_address == 0) {
- LOG_ERROR(Service_NVDRV, "VIC Luma address not set. Received 0x{:X}",
- vic_state.output_surface.luma_offset);
+ LOG_ERROR(Service_NVDRV, "VIC Luma address not set.");
return;
}
const VicConfig config{gpu.MemoryManager().Read<u64>(config_struct_address + 0x20)};
@@ -89,8 +84,10 @@ void Vic::Execute() {
// Get Converted frame
const std::size_t linear_size = frame->width * frame->height * 4;
- using AVMallocPtr = std::unique_ptr<u8, decltype(&av_free)>;
- AVMallocPtr converted_frame_buffer{static_cast<u8*>(av_malloc(linear_size)), av_free};
+ // Only allocate frame_buffer once per stream, as the size is not expected to change
+ if (!converted_frame_buffer) {
+ converted_frame_buffer = AVMallocPtr{static_cast<u8*>(av_malloc(linear_size)), av_free};
+ }
const int converted_stride{frame->width * 4};
u8* const converted_frame_buf_addr{converted_frame_buffer.get()};
@@ -104,12 +101,12 @@ void Vic::Execute() {
const u32 block_height = static_cast<u32>(config.block_linear_height_log2);
const auto size = Tegra::Texture::CalculateSize(true, 4, frame->width, frame->height, 1,
block_height, 0);
- std::vector<u8> swizzled_data(size);
+ luma_buffer.resize(size);
Tegra::Texture::SwizzleSubrect(frame->width, frame->height, frame->width * 4,
- frame->width, 4, swizzled_data.data(),
+ frame->width, 4, luma_buffer.data(),
converted_frame_buffer.get(), block_height, 0, 0);
- gpu.MemoryManager().WriteBlock(output_surface_luma_address, swizzled_data.data(), size);
+ gpu.MemoryManager().WriteBlock(output_surface_luma_address, luma_buffer.data(), size);
} else {
// send pitch linear frame
gpu.MemoryManager().WriteBlock(output_surface_luma_address, converted_frame_buf_addr,
@@ -132,15 +129,15 @@ void Vic::Execute() {
const auto stride = frame->linesize[0];
const auto half_stride = frame->linesize[1];
- std::vector<u8> luma_buffer(aligned_width * surface_height);
- std::vector<u8> chroma_buffer(aligned_width * half_height);
+ luma_buffer.resize(aligned_width * surface_height);
+ chroma_buffer.resize(aligned_width * half_height);
// Populate luma buffer
for (std::size_t y = 0; y < surface_height - 1; ++y) {
- std::size_t src = y * stride;
- std::size_t dst = y * aligned_width;
+ const std::size_t src = y * stride;
+ const std::size_t dst = y * aligned_width;
- std::size_t size = surface_width;
+ const std::size_t size = surface_width;
for (std::size_t offset = 0; offset < size; ++offset) {
luma_buffer[dst + offset] = luma_ptr[src + offset];
@@ -151,8 +148,8 @@ void Vic::Execute() {
// Populate chroma buffer from both channels with interleaving.
for (std::size_t y = 0; y < half_height; ++y) {
- std::size_t src = y * half_stride;
- std::size_t dst = y * aligned_width;
+ const std::size_t src = y * half_stride;
+ const std::size_t dst = y * aligned_width;
for (std::size_t x = 0; x < half_width; ++x) {
chroma_buffer[dst + x * 2] = chroma_b_ptr[src + x];
diff --git a/src/video_core/command_classes/vic.h b/src/video_core/command_classes/vic.h
index 8c4e284a1..f5a2ed100 100644
--- a/src/video_core/command_classes/vic.h
+++ b/src/video_core/command_classes/vic.h
@@ -15,43 +15,6 @@ namespace Tegra {
class GPU;
class Nvdec;
-struct PlaneOffsets {
- u32 luma_offset{};
- u32 chroma_u_offset{};
- u32 chroma_v_offset{};
-};
-
-struct VicRegisters {
- INSERT_PADDING_WORDS(64);
- u32 nop{};
- INSERT_PADDING_WORDS(15);
- u32 pm_trigger{};
- INSERT_PADDING_WORDS(47);
- u32 set_application_id{};
- u32 set_watchdog_timer{};
- INSERT_PADDING_WORDS(17);
- u32 context_save_area{};
- u32 context_switch{};
- INSERT_PADDING_WORDS(43);
- u32 execute{};
- INSERT_PADDING_WORDS(63);
- std::array<std::array<PlaneOffsets, 8>, 8> surfacex_slots{};
- u32 picture_index{};
- u32 control_params{};
- u32 config_struct_offset{};
- u32 filter_struct_offset{};
- u32 palette_offset{};
- u32 hist_offset{};
- u32 context_id{};
- u32 fce_ucode_size{};
- PlaneOffsets output_surface{};
- u32 fce_ucode_offset{};
- INSERT_PADDING_WORDS(4);
- std::array<u32, 8> slot_context_id{};
- INSERT_PADDING_WORDS(16);
-};
-static_assert(sizeof(VicRegisters) == 0x7A0, "VicRegisters is an invalid size");
-
class Vic {
public:
enum class Method : u32 {
@@ -67,14 +30,11 @@ public:
~Vic();
/// Write to the device state.
- void ProcessMethod(Method method, const std::vector<u32>& arguments);
+ void ProcessMethod(Method method, u32 argument);
private:
void Execute();
- void VicStateWrite(u32 offset, u32 arguments);
- VicRegisters vic_state{};
-
enum class VideoPixelFormat : u64_le {
RGBA8 = 0x1f,
BGRA8 = 0x20,
@@ -88,8 +48,6 @@ private:
BitField<9, 2, u64_le> chroma_loc_vert;
BitField<11, 4, u64_le> block_linear_kind;
BitField<15, 4, u64_le> block_linear_height_log2;
- BitField<19, 3, u64_le> reserved0;
- BitField<22, 10, u64_le> reserved1;
BitField<32, 14, u64_le> surface_width_minus1;
BitField<46, 14, u64_le> surface_height_minus1;
};
@@ -97,6 +55,13 @@ private:
GPU& gpu;
std::shared_ptr<Tegra::Nvdec> nvdec_processor;
+ /// Avoid reallocation of the following buffers every frame, as their
+ /// size does not change during a stream
+ using AVMallocPtr = std::unique_ptr<u8, decltype(&av_free)>;
+ AVMallocPtr converted_frame_buffer;
+ std::vector<u8> luma_buffer;
+ std::vector<u8> chroma_buffer;
+
GPUVAddr config_struct_address{};
GPUVAddr output_surface_luma_address{};
GPUVAddr output_surface_chroma_u_address{};
diff --git a/src/video_core/gpu.cpp b/src/video_core/gpu.cpp
index 2a9bd4121..51c63af4a 100644
--- a/src/video_core/gpu.cpp
+++ b/src/video_core/gpu.cpp
@@ -30,8 +30,7 @@ MICROPROFILE_DEFINE(GPU_wait, "GPU", "Wait for the GPU", MP_RGB(128, 128, 192));
GPU::GPU(Core::System& system_, bool is_async_, bool use_nvdec_)
: system{system_}, memory_manager{std::make_unique<Tegra::MemoryManager>(system)},
- dma_pusher{std::make_unique<Tegra::DmaPusher>(system, *this)},
- cdma_pusher{std::make_unique<Tegra::CDmaPusher>(*this)}, use_nvdec{use_nvdec_},
+ dma_pusher{std::make_unique<Tegra::DmaPusher>(system, *this)}, use_nvdec{use_nvdec_},
maxwell_3d{std::make_unique<Engines::Maxwell3D>(system, *memory_manager)},
fermi_2d{std::make_unique<Engines::Fermi2D>()},
kepler_compute{std::make_unique<Engines::KeplerCompute>(system, *memory_manager)},
@@ -494,8 +493,7 @@ void GPU::PushCommandBuffer(Tegra::ChCommandHeaderList& entries) {
// TODO(ameerj): RE proper async nvdec operation
// gpu_thread.SubmitCommandBuffer(std::move(entries));
- cdma_pusher->Push(std::move(entries));
- cdma_pusher->DispatchCalls();
+ cdma_pusher->ProcessEntries(std::move(entries));
}
void GPU::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) {
diff --git a/src/video_core/gpu_thread.cpp b/src/video_core/gpu_thread.cpp
index 50319f1d5..eb0e43c0c 100644
--- a/src/video_core/gpu_thread.cpp
+++ b/src/video_core/gpu_thread.cpp
@@ -48,8 +48,7 @@ static void RunThread(Core::System& system, VideoCore::RendererBase& renderer,
dma_pusher.DispatchCalls();
} else if (auto* command_list = std::get_if<SubmitChCommandEntries>(&next.data)) {
// NVDEC
- cdma_pusher.Push(std::move(command_list->entries));
- cdma_pusher.DispatchCalls();
+ cdma_pusher.ProcessEntries(std::move(command_list->entries));
} else if (const auto* data = std::get_if<SwapBuffersCommand>(&next.data)) {
renderer.SwapBuffers(data->framebuffer ? &*data->framebuffer : nullptr);
} else if (std::holds_alternative<OnCommandListEndCommand>(next.data)) {
diff --git a/src/video_core/renderer_opengl/gl_shader_cache.cpp b/src/video_core/renderer_opengl/gl_shader_cache.cpp
index 529570ff0..5cf7cd151 100644
--- a/src/video_core/renderer_opengl/gl_shader_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_shader_cache.cpp
@@ -335,6 +335,10 @@ void ShaderCacheOpenGL::LoadDiskCache(u64 title_id, const std::atomic_bool& stop
const VideoCore::DiskResourceLoadCallback& callback) {
disk_cache.BindTitleID(title_id);
const std::optional transferable = disk_cache.LoadTransferable();
+
+ LOG_INFO(Render_OpenGL, "Total Shader Count: {}",
+ transferable.has_value() ? transferable->size() : 0);
+
if (!transferable) {
return;
}
diff --git a/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp b/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp
index 5be6dabd9..362278f01 100644
--- a/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp
+++ b/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp
@@ -12,14 +12,15 @@
#include "common/cityhash.h"
#include "common/common_types.h"
#include "video_core/renderer_vulkan/fixed_pipeline_state.h"
+#include "video_core/renderer_vulkan/vk_state_tracker.h"
namespace Vulkan {
namespace {
-constexpr std::size_t POINT = 0;
-constexpr std::size_t LINE = 1;
-constexpr std::size_t POLYGON = 2;
+constexpr size_t POINT = 0;
+constexpr size_t LINE = 1;
+constexpr size_t POLYGON = 2;
constexpr std::array POLYGON_OFFSET_ENABLE_LUT = {
POINT, // Points
LINE, // Lines
@@ -40,10 +41,14 @@ constexpr std::array POLYGON_OFFSET_ENABLE_LUT = {
} // Anonymous namespace
-void FixedPipelineState::Fill(const Maxwell& regs, bool has_extended_dynamic_state) {
- const std::array enabled_lut = {regs.polygon_offset_point_enable,
- regs.polygon_offset_line_enable,
- regs.polygon_offset_fill_enable};
+void FixedPipelineState::Refresh(Tegra::Engines::Maxwell3D& maxwell3d,
+ bool has_extended_dynamic_state) {
+ const Maxwell& regs = maxwell3d.regs;
+ const std::array enabled_lut{
+ regs.polygon_offset_point_enable,
+ regs.polygon_offset_line_enable,
+ regs.polygon_offset_fill_enable,
+ };
const u32 topology_index = static_cast<u32>(regs.draw.topology.Value());
raw1 = 0;
@@ -64,45 +69,53 @@ void FixedPipelineState::Fill(const Maxwell& regs, bool has_extended_dynamic_sta
raw2 = 0;
const auto test_func =
- regs.alpha_test_enabled == 1 ? regs.alpha_test_func : Maxwell::ComparisonOp::Always;
+ regs.alpha_test_enabled != 0 ? regs.alpha_test_func : Maxwell::ComparisonOp::Always;
alpha_test_func.Assign(PackComparisonOp(test_func));
early_z.Assign(regs.force_early_fragment_tests != 0 ? 1 : 0);
alpha_test_ref = Common::BitCast<u32>(regs.alpha_test_ref);
point_size = Common::BitCast<u32>(regs.point_size);
- for (std::size_t index = 0; index < Maxwell::NumVertexArrays; ++index) {
- binding_divisors[index] =
- regs.instanced_arrays.IsInstancingEnabled(index) ? regs.vertex_array[index].divisor : 0;
+ if (maxwell3d.dirty.flags[Dirty::InstanceDivisors]) {
+ maxwell3d.dirty.flags[Dirty::InstanceDivisors] = false;
+ for (size_t index = 0; index < Maxwell::NumVertexArrays; ++index) {
+ const bool is_enabled = regs.instanced_arrays.IsInstancingEnabled(index);
+ binding_divisors[index] = is_enabled ? regs.vertex_array[index].divisor : 0;
+ }
}
-
- for (size_t index = 0; index < Maxwell::NumVertexAttributes; ++index) {
- const auto& input = regs.vertex_attrib_format[index];
- auto& attribute = attributes[index];
- attribute.raw = 0;
- attribute.enabled.Assign(input.IsConstant() ? 0 : 1);
- attribute.buffer.Assign(input.buffer);
- attribute.offset.Assign(input.offset);
- attribute.type.Assign(static_cast<u32>(input.type.Value()));
- attribute.size.Assign(static_cast<u32>(input.size.Value()));
- attribute.binding_index_enabled.Assign(regs.vertex_array[index].IsEnabled() ? 1 : 0);
+ if (maxwell3d.dirty.flags[Dirty::VertexAttributes]) {
+ maxwell3d.dirty.flags[Dirty::VertexAttributes] = false;
+ for (size_t index = 0; index < Maxwell::NumVertexAttributes; ++index) {
+ const auto& input = regs.vertex_attrib_format[index];
+ auto& attribute = attributes[index];
+ attribute.raw = 0;
+ attribute.enabled.Assign(input.IsConstant() ? 0 : 1);
+ attribute.buffer.Assign(input.buffer);
+ attribute.offset.Assign(input.offset);
+ attribute.type.Assign(static_cast<u32>(input.type.Value()));
+ attribute.size.Assign(static_cast<u32>(input.size.Value()));
+ }
}
-
- for (std::size_t index = 0; index < std::size(attachments); ++index) {
- attachments[index].Fill(regs, index);
+ if (maxwell3d.dirty.flags[Dirty::Blending]) {
+ maxwell3d.dirty.flags[Dirty::Blending] = false;
+ for (size_t index = 0; index < attachments.size(); ++index) {
+ attachments[index].Refresh(regs, index);
+ }
+ }
+ if (maxwell3d.dirty.flags[Dirty::ViewportSwizzles]) {
+ maxwell3d.dirty.flags[Dirty::ViewportSwizzles] = false;
+ const auto& transform = regs.viewport_transform;
+ std::ranges::transform(transform, viewport_swizzles.begin(), [](const auto& viewport) {
+ return static_cast<u16>(viewport.swizzle.raw);
+ });
}
-
- const auto& transform = regs.viewport_transform;
- std::transform(transform.begin(), transform.end(), viewport_swizzles.begin(),
- [](const auto& viewport) { return static_cast<u16>(viewport.swizzle.raw); });
-
if (!has_extended_dynamic_state) {
no_extended_dynamic_state.Assign(1);
- dynamic_state.Fill(regs);
+ dynamic_state.Refresh(regs);
}
}
-void FixedPipelineState::BlendingAttachment::Fill(const Maxwell& regs, std::size_t index) {
+void FixedPipelineState::BlendingAttachment::Refresh(const Maxwell& regs, size_t index) {
const auto& mask = regs.color_mask[regs.color_mask_common ? 0 : index];
raw = 0;
@@ -141,7 +154,7 @@ void FixedPipelineState::BlendingAttachment::Fill(const Maxwell& regs, std::size
enable.Assign(1);
}
-void FixedPipelineState::DynamicState::Fill(const Maxwell& regs) {
+void FixedPipelineState::DynamicState::Refresh(const Maxwell& regs) {
u32 packed_front_face = PackFrontFace(regs.front_face);
if (regs.screen_y_control.triangle_rast_flip != 0) {
// Flip front face
@@ -178,9 +191,9 @@ void FixedPipelineState::DynamicState::Fill(const Maxwell& regs) {
});
}
-std::size_t FixedPipelineState::Hash() const noexcept {
+size_t FixedPipelineState::Hash() const noexcept {
const u64 hash = Common::CityHash64(reinterpret_cast<const char*>(this), Size());
- return static_cast<std::size_t>(hash);
+ return static_cast<size_t>(hash);
}
bool FixedPipelineState::operator==(const FixedPipelineState& rhs) const noexcept {
diff --git a/src/video_core/renderer_vulkan/fixed_pipeline_state.h b/src/video_core/renderer_vulkan/fixed_pipeline_state.h
index 465a55fdb..a0eb83a68 100644
--- a/src/video_core/renderer_vulkan/fixed_pipeline_state.h
+++ b/src/video_core/renderer_vulkan/fixed_pipeline_state.h
@@ -58,7 +58,7 @@ struct FixedPipelineState {
BitField<30, 1, u32> enable;
};
- void Fill(const Maxwell& regs, std::size_t index);
+ void Refresh(const Maxwell& regs, size_t index);
constexpr std::array<bool, 4> Mask() const noexcept {
return {mask_r != 0, mask_g != 0, mask_b != 0, mask_a != 0};
@@ -96,8 +96,6 @@ struct FixedPipelineState {
BitField<6, 14, u32> offset;
BitField<20, 3, u32> type;
BitField<23, 6, u32> size;
- // Not really an element of a vertex attribute, but it can be packed here
- BitField<29, 1, u32> binding_index_enabled;
constexpr Maxwell::VertexAttribute::Type Type() const noexcept {
return static_cast<Maxwell::VertexAttribute::Type>(type.Value());
@@ -108,7 +106,7 @@ struct FixedPipelineState {
}
};
- template <std::size_t Position>
+ template <size_t Position>
union StencilFace {
BitField<Position + 0, 3, u32> action_stencil_fail;
BitField<Position + 3, 3, u32> action_depth_fail;
@@ -152,7 +150,7 @@ struct FixedPipelineState {
// Vertex stride is a 12 bits value, we have 4 bits to spare per element
std::array<u16, Maxwell::NumVertexArrays> vertex_strides;
- void Fill(const Maxwell& regs);
+ void Refresh(const Maxwell& regs);
Maxwell::ComparisonOp DepthTestFunc() const noexcept {
return UnpackComparisonOp(depth_test_func);
@@ -199,9 +197,9 @@ struct FixedPipelineState {
std::array<u16, Maxwell::NumViewports> viewport_swizzles;
DynamicState dynamic_state;
- void Fill(const Maxwell& regs, bool has_extended_dynamic_state);
+ void Refresh(Tegra::Engines::Maxwell3D& maxwell3d, bool has_extended_dynamic_state);
- std::size_t Hash() const noexcept;
+ size_t Hash() const noexcept;
bool operator==(const FixedPipelineState& rhs) const noexcept;
@@ -209,8 +207,8 @@ struct FixedPipelineState {
return !operator==(rhs);
}
- std::size_t Size() const noexcept {
- const std::size_t total_size = sizeof *this;
+ size_t Size() const noexcept {
+ const size_t total_size = sizeof *this;
return total_size - (no_extended_dynamic_state != 0 ? 0 : sizeof(DynamicState));
}
};
@@ -224,7 +222,7 @@ namespace std {
template <>
struct hash<Vulkan::FixedPipelineState> {
- std::size_t operator()(const Vulkan::FixedPipelineState& k) const noexcept {
+ size_t operator()(const Vulkan::FixedPipelineState& k) const noexcept {
return k.Hash();
}
};
diff --git a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp
index 848eedd66..668633e7b 100644
--- a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp
@@ -201,10 +201,6 @@ void BufferCacheRuntime::BindTransformFeedbackBuffer(u32 index, VkBuffer buffer,
});
}
-void BufferCacheRuntime::BindBuffer(VkBuffer buffer, u32 offset, u32 size) {
- update_descriptor_queue.AddBuffer(buffer, offset, size);
-}
-
void BufferCacheRuntime::ReserveQuadArrayLUT(u32 num_indices, bool wait_for_idle) {
if (num_indices <= current_num_indices) {
return;
diff --git a/src/video_core/renderer_vulkan/vk_buffer_cache.h b/src/video_core/renderer_vulkan/vk_buffer_cache.h
index 041e6515c..982e92191 100644
--- a/src/video_core/renderer_vulkan/vk_buffer_cache.h
+++ b/src/video_core/renderer_vulkan/vk_buffer_cache.h
@@ -8,6 +8,7 @@
#include "video_core/engines/maxwell_3d.h"
#include "video_core/renderer_vulkan/vk_compute_pass.h"
#include "video_core/renderer_vulkan/vk_staging_buffer_pool.h"
+#include "video_core/renderer_vulkan/vk_update_descriptor.h"
#include "video_core/vulkan_common/vulkan_memory_allocator.h"
#include "video_core/vulkan_common/vulkan_wrapper.h"
@@ -16,7 +17,6 @@ namespace Vulkan {
class Device;
class VKDescriptorPool;
class VKScheduler;
-class VKUpdateDescriptorQueue;
class BufferCacheRuntime;
@@ -86,7 +86,9 @@ public:
}
private:
- void BindBuffer(VkBuffer buffer, u32 offset, u32 size);
+ void BindBuffer(VkBuffer buffer, u32 offset, u32 size) {
+ update_descriptor_queue.AddBuffer(buffer, offset, size);
+ }
void ReserveQuadArrayLUT(u32 num_indices, bool wait_for_idle);
diff --git a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp
index d50dca604..fc6dd83eb 100644
--- a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp
+++ b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp
@@ -221,9 +221,6 @@ vk::Pipeline VKGraphicsPipeline::CreatePipeline(const SPIRVProgram& program,
std::vector<VkVertexInputBindingDescription> vertex_bindings;
std::vector<VkVertexInputBindingDivisorDescriptionEXT> vertex_binding_divisors;
for (std::size_t index = 0; index < Maxwell::NumVertexArrays; ++index) {
- if (state.attributes[index].binding_index_enabled == 0) {
- continue;
- }
const bool instanced = state.binding_divisors[index] != 0;
const auto rate = instanced ? VK_VERTEX_INPUT_RATE_INSTANCE : VK_VERTEX_INPUT_RATE_VERTEX;
vertex_bindings.push_back({
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
index 684d4e3a6..dfd38f575 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
@@ -267,8 +267,7 @@ void RasterizerVulkan::Draw(bool is_indexed, bool is_instanced) {
query_cache.UpdateCounters();
- GraphicsPipelineCacheKey key;
- key.fixed_state.Fill(maxwell3d.regs, device.IsExtExtendedDynamicStateSupported());
+ graphics_key.fixed_state.Refresh(maxwell3d, device.IsExtExtendedDynamicStateSupported());
std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
@@ -276,14 +275,15 @@ void RasterizerVulkan::Draw(bool is_indexed, bool is_instanced) {
texture_cache.UpdateRenderTargets(false);
const auto shaders = pipeline_cache.GetShaders();
- key.shaders = GetShaderAddresses(shaders);
+ graphics_key.shaders = GetShaderAddresses(shaders);
+
SetupShaderDescriptors(shaders, is_indexed);
const Framebuffer* const framebuffer = texture_cache.GetFramebuffer();
- key.renderpass = framebuffer->RenderPass();
+ graphics_key.renderpass = framebuffer->RenderPass();
- auto* const pipeline =
- pipeline_cache.GetGraphicsPipeline(key, framebuffer->NumColorBuffers(), async_shaders);
+ VKGraphicsPipeline* const pipeline = pipeline_cache.GetGraphicsPipeline(
+ graphics_key, framebuffer->NumColorBuffers(), async_shaders);
if (pipeline == nullptr || pipeline->GetHandle() == VK_NULL_HANDLE) {
// Async graphics pipeline was not ready.
return;
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.h b/src/video_core/renderer_vulkan/vk_rasterizer.h
index 7fc6741da..acea1ba2d 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.h
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.h
@@ -20,6 +20,7 @@
#include "video_core/renderer_vulkan/vk_buffer_cache.h"
#include "video_core/renderer_vulkan/vk_descriptor_pool.h"
#include "video_core/renderer_vulkan/vk_fence_manager.h"
+#include "video_core/renderer_vulkan/vk_graphics_pipeline.h"
#include "video_core/renderer_vulkan/vk_pipeline_cache.h"
#include "video_core/renderer_vulkan/vk_query_cache.h"
#include "video_core/renderer_vulkan/vk_scheduler.h"
@@ -173,6 +174,8 @@ private:
VKUpdateDescriptorQueue update_descriptor_queue;
BlitImageHelper blit_image;
+ GraphicsPipelineCacheKey graphics_key;
+
TextureCacheRuntime texture_cache_runtime;
TextureCache texture_cache;
BufferCacheRuntime buffer_cache_runtime;
diff --git a/src/video_core/renderer_vulkan/vk_state_tracker.cpp b/src/video_core/renderer_vulkan/vk_state_tracker.cpp
index e81fad007..956f86845 100644
--- a/src/video_core/renderer_vulkan/vk_state_tracker.cpp
+++ b/src/video_core/renderer_vulkan/vk_state_tracker.cpp
@@ -18,9 +18,7 @@
#define NUM(field_name) (sizeof(Maxwell3D::Regs::field_name) / (sizeof(u32)))
namespace Vulkan {
-
namespace {
-
using namespace Dirty;
using namespace VideoCommon::Dirty;
using Tegra::Engines::Maxwell3D;
@@ -128,6 +126,34 @@ void SetupDirtyStencilTestEnable(Tables& tables) {
tables[0][OFF(stencil_enable)] = StencilTestEnable;
}
+void SetupDirtyBlending(Tables& tables) {
+ tables[0][OFF(color_mask_common)] = Blending;
+ tables[0][OFF(independent_blend_enable)] = Blending;
+ FillBlock(tables[0], OFF(color_mask), NUM(color_mask), Blending);
+ FillBlock(tables[0], OFF(blend), NUM(blend), Blending);
+ FillBlock(tables[0], OFF(independent_blend), NUM(independent_blend), Blending);
+}
+
+void SetupDirtyInstanceDivisors(Tables& tables) {
+ static constexpr size_t divisor_offset = 3;
+ for (size_t index = 0; index < Regs::NumVertexArrays; ++index) {
+ tables[0][OFF(instanced_arrays) + index] = InstanceDivisors;
+ tables[0][OFF(vertex_array) + index * NUM(vertex_array[0]) + divisor_offset] =
+ InstanceDivisors;
+ }
+}
+
+void SetupDirtyVertexAttributes(Tables& tables) {
+ FillBlock(tables[0], OFF(vertex_attrib_format), NUM(vertex_attrib_format), VertexAttributes);
+}
+
+void SetupDirtyViewportSwizzles(Tables& tables) {
+ static constexpr size_t swizzle_offset = 6;
+ for (size_t index = 0; index < Regs::NumViewports; ++index) {
+ tables[0][OFF(viewport_transform) + index * NUM(viewport_transform[0]) + swizzle_offset] =
+ ViewportSwizzles;
+ }
+}
} // Anonymous namespace
StateTracker::StateTracker(Tegra::GPU& gpu)
@@ -148,6 +174,10 @@ StateTracker::StateTracker(Tegra::GPU& gpu)
SetupDirtyFrontFace(tables);
SetupDirtyStencilOp(tables);
SetupDirtyStencilTestEnable(tables);
+ SetupDirtyBlending(tables);
+ SetupDirtyInstanceDivisors(tables);
+ SetupDirtyVertexAttributes(tables);
+ SetupDirtyViewportSwizzles(tables);
}
} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_state_tracker.h b/src/video_core/renderer_vulkan/vk_state_tracker.h
index c335d2bdf..84e918a71 100644
--- a/src/video_core/renderer_vulkan/vk_state_tracker.h
+++ b/src/video_core/renderer_vulkan/vk_state_tracker.h
@@ -35,6 +35,11 @@ enum : u8 {
StencilOp,
StencilTestEnable,
+ Blending,
+ InstanceDivisors,
+ VertexAttributes,
+ ViewportSwizzles,
+
Last
};
static_assert(Last <= std::numeric_limits<u8>::max());
diff --git a/src/video_core/renderer_vulkan/vk_update_descriptor.cpp b/src/video_core/renderer_vulkan/vk_update_descriptor.cpp
index f99273c6a..dc45fdcb1 100644
--- a/src/video_core/renderer_vulkan/vk_update_descriptor.cpp
+++ b/src/video_core/renderer_vulkan/vk_update_descriptor.cpp
@@ -20,20 +20,20 @@ VKUpdateDescriptorQueue::VKUpdateDescriptorQueue(const Device& device_, VKSchedu
VKUpdateDescriptorQueue::~VKUpdateDescriptorQueue() = default;
void VKUpdateDescriptorQueue::TickFrame() {
- payload.clear();
+ payload_cursor = payload.data();
}
void VKUpdateDescriptorQueue::Acquire() {
// Minimum number of entries required.
// This is the maximum number of entries a single draw call migth use.
- static constexpr std::size_t MIN_ENTRIES = 0x400;
+ static constexpr size_t MIN_ENTRIES = 0x400;
- if (payload.size() + MIN_ENTRIES >= payload.max_size()) {
+ if (std::distance(payload.data(), payload_cursor) + MIN_ENTRIES >= payload.max_size()) {
LOG_WARNING(Render_Vulkan, "Payload overflow, waiting for worker thread");
scheduler.WaitWorker();
- payload.clear();
+ payload_cursor = payload.data();
}
- upload_start = &*payload.end();
+ upload_start = payload_cursor;
}
void VKUpdateDescriptorQueue::Send(VkDescriptorUpdateTemplateKHR update_template,
diff --git a/src/video_core/renderer_vulkan/vk_update_descriptor.h b/src/video_core/renderer_vulkan/vk_update_descriptor.h
index e214f7195..d35e77c44 100644
--- a/src/video_core/renderer_vulkan/vk_update_descriptor.h
+++ b/src/video_core/renderer_vulkan/vk_update_descriptor.h
@@ -4,8 +4,7 @@
#pragma once
-#include <variant>
-#include <boost/container/static_vector.hpp>
+#include <array>
#include "common/common_types.h"
#include "video_core/vulkan_common/vulkan_wrapper.h"
@@ -16,13 +15,15 @@ class Device;
class VKScheduler;
struct DescriptorUpdateEntry {
- DescriptorUpdateEntry(VkDescriptorImageInfo image_) : image{image_} {}
+ struct Empty {};
+ DescriptorUpdateEntry() = default;
+ DescriptorUpdateEntry(VkDescriptorImageInfo image_) : image{image_} {}
DescriptorUpdateEntry(VkDescriptorBufferInfo buffer_) : buffer{buffer_} {}
-
DescriptorUpdateEntry(VkBufferView texel_buffer_) : texel_buffer{texel_buffer_} {}
union {
+ Empty empty{};
VkDescriptorImageInfo image;
VkDescriptorBufferInfo buffer;
VkBufferView texel_buffer;
@@ -41,39 +42,40 @@ public:
void Send(VkDescriptorUpdateTemplateKHR update_template, VkDescriptorSet set);
void AddSampledImage(VkImageView image_view, VkSampler sampler) {
- payload.emplace_back(VkDescriptorImageInfo{
+ *(payload_cursor++) = VkDescriptorImageInfo{
.sampler = sampler,
.imageView = image_view,
.imageLayout = VK_IMAGE_LAYOUT_GENERAL,
- });
+ };
}
void AddImage(VkImageView image_view) {
- payload.emplace_back(VkDescriptorImageInfo{
+ *(payload_cursor++) = VkDescriptorImageInfo{
.sampler = VK_NULL_HANDLE,
.imageView = image_view,
.imageLayout = VK_IMAGE_LAYOUT_GENERAL,
- });
+ };
}
- void AddBuffer(VkBuffer buffer, u64 offset, size_t size) {
- payload.emplace_back(VkDescriptorBufferInfo{
+ void AddBuffer(VkBuffer buffer, VkDeviceSize offset, VkDeviceSize size) {
+ *(payload_cursor++) = VkDescriptorBufferInfo{
.buffer = buffer,
.offset = offset,
.range = size,
- });
+ };
}
void AddTexelBuffer(VkBufferView texel_buffer) {
- payload.emplace_back(texel_buffer);
+ *(payload_cursor++) = texel_buffer;
}
private:
const Device& device;
VKScheduler& scheduler;
+ DescriptorUpdateEntry* payload_cursor = nullptr;
const DescriptorUpdateEntry* upload_start = nullptr;
- boost::container::static_vector<DescriptorUpdateEntry, 0x10000> payload;
+ std::array<DescriptorUpdateEntry, 0x10000> payload;
};
} // namespace Vulkan