summaryrefslogtreecommitdiffstats
path: root/src/video_core
diff options
context:
space:
mode:
Diffstat (limited to 'src/video_core')
-rw-r--r--src/video_core/CMakeLists.txt2
-rw-r--r--src/video_core/buffer_cache/buffer_block.h42
-rw-r--r--src/video_core/buffer_cache/buffer_cache.h145
-rw-r--r--src/video_core/buffer_cache/map_interval.h12
-rw-r--r--src/video_core/engines/maxwell_3d.h81
-rw-r--r--src/video_core/engines/shader_bytecode.h71
-rw-r--r--src/video_core/engines/shader_header.h55
-rw-r--r--src/video_core/gpu.h6
-rw-r--r--src/video_core/gpu_asynch.cpp11
-rw-r--r--src/video_core/gpu_asynch.h6
-rw-r--r--src/video_core/gpu_synch.cpp6
-rw-r--r--src/video_core/gpu_synch.h6
-rw-r--r--src/video_core/gpu_thread.cpp6
-rw-r--r--src/video_core/gpu_thread.h18
-rw-r--r--src/video_core/memory_manager.cpp93
-rw-r--r--src/video_core/memory_manager.h5
-rw-r--r--src/video_core/query_cache.h37
-rw-r--r--src/video_core/rasterizer_cache.h44
-rw-r--r--src/video_core/rasterizer_interface.h6
-rw-r--r--src/video_core/renderer_opengl/gl_buffer_cache.cpp8
-rw-r--r--src/video_core/renderer_opengl/gl_buffer_cache.h4
-rw-r--r--src/video_core/renderer_opengl/gl_device.cpp26
-rw-r--r--src/video_core/renderer_opengl/gl_device.h5
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer.cpp42
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer.h9
-rw-r--r--src/video_core/renderer_opengl/gl_shader_cache.cpp50
-rw-r--r--src/video_core/renderer_opengl/gl_shader_cache.h8
-rw-r--r--src/video_core/renderer_opengl/gl_shader_decompiler.cpp48
-rw-r--r--src/video_core/renderer_opengl/gl_state_tracker.cpp7
-rw-r--r--src/video_core/renderer_opengl/gl_state_tracker.h1
-rw-r--r--src/video_core/renderer_opengl/gl_texture_cache.cpp215
-rw-r--r--src/video_core/renderer_opengl/gl_texture_cache.h6
-rw-r--r--src/video_core/renderer_opengl/renderer_opengl.cpp4
-rw-r--r--src/video_core/renderer_vulkan/declarations.h58
-rw-r--r--src/video_core/renderer_vulkan/maxwell_to_vk.cpp533
-rw-r--r--src/video_core/renderer_vulkan/maxwell_to_vk.h37
-rw-r--r--src/video_core/renderer_vulkan/renderer_vulkan.cpp291
-rw-r--r--src/video_core/renderer_vulkan/renderer_vulkan.h19
-rw-r--r--src/video_core/renderer_vulkan/vk_blit_screen.cpp625
-rw-r--r--src/video_core/renderer_vulkan/vk_blit_screen.h34
-rw-r--r--src/video_core/renderer_vulkan/vk_buffer_cache.cpp157
-rw-r--r--src/video_core/renderer_vulkan/vk_buffer_cache.h16
-rw-r--r--src/video_core/renderer_vulkan/vk_compute_pass.cpp241
-rw-r--r--src/video_core/renderer_vulkan/vk_compute_pass.h29
-rw-r--r--src/video_core/renderer_vulkan/vk_compute_pipeline.cpp129
-rw-r--r--src/video_core/renderer_vulkan/vk_compute_pipeline.h30
-rw-r--r--src/video_core/renderer_vulkan/vk_descriptor_pool.cpp92
-rw-r--r--src/video_core/renderer_vulkan/vk_descriptor_pool.h19
-rw-r--r--src/video_core/renderer_vulkan/vk_device.cpp575
-rw-r--r--src/video_core/renderer_vulkan/vk_device.h83
-rw-r--r--src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp378
-rw-r--r--src/video_core/renderer_vulkan/vk_graphics_pipeline.h38
-rw-r--r--src/video_core/renderer_vulkan/vk_image.cpp61
-rw-r--r--src/video_core/renderer_vulkan/vk_image.h40
-rw-r--r--src/video_core/renderer_vulkan/vk_memory_manager.cpp104
-rw-r--r--src/video_core/renderer_vulkan/vk_memory_manager.h34
-rw-r--r--src/video_core/renderer_vulkan/vk_pipeline_cache.cpp131
-rw-r--r--src/video_core/renderer_vulkan/vk_pipeline_cache.h13
-rw-r--r--src/video_core/renderer_vulkan/vk_query_cache.cpp56
-rw-r--r--src/video_core/renderer_vulkan/vk_query_cache.h14
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.cpp309
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.h21
-rw-r--r--src/video_core/renderer_vulkan/vk_renderpass_cache.cpp120
-rw-r--r--src/video_core/renderer_vulkan/vk_renderpass_cache.h8
-rw-r--r--src/video_core/renderer_vulkan/vk_resource_manager.cpp87
-rw-r--r--src/video_core/renderer_vulkan/vk_resource_manager.h10
-rw-r--r--src/video_core/renderer_vulkan/vk_sampler_cache.cpp76
-rw-r--r--src/video_core/renderer_vulkan/vk_sampler_cache.h8
-rw-r--r--src/video_core/renderer_vulkan/vk_scheduler.cpp87
-rw-r--r--src/video_core/renderer_vulkan/vk_scheduler.h29
-rw-r--r--src/video_core/renderer_vulkan/vk_shader_decompiler.cpp19
-rw-r--r--src/video_core/renderer_vulkan/vk_shader_util.cpp20
-rw-r--r--src/video_core/renderer_vulkan/vk_shader_util.h4
-rw-r--r--src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp25
-rw-r--r--src/video_core/renderer_vulkan/vk_staging_buffer_pool.h4
-rw-r--r--src/video_core/renderer_vulkan/vk_stream_buffer.cpp64
-rw-r--r--src/video_core/renderer_vulkan/vk_stream_buffer.h18
-rw-r--r--src/video_core/renderer_vulkan/vk_swapchain.cpp155
-rw-r--r--src/video_core/renderer_vulkan/vk_swapchain.h32
-rw-r--r--src/video_core/renderer_vulkan/vk_texture_cache.cpp380
-rw-r--r--src/video_core/renderer_vulkan/vk_texture_cache.h64
-rw-r--r--src/video_core/renderer_vulkan/vk_update_descriptor.cpp18
-rw-r--r--src/video_core/renderer_vulkan/vk_update_descriptor.h42
-rw-r--r--src/video_core/shader/decode/arithmetic.cpp3
-rw-r--r--src/video_core/shader/decode/conversion.cpp113
-rw-r--r--src/video_core/shader/decode/image.cpp360
-rw-r--r--src/video_core/shader/decode/other.cpp48
-rw-r--r--src/video_core/shader/decode/texture.cpp14
-rw-r--r--src/video_core/shader/decode/video.cpp58
-rw-r--r--src/video_core/shader/shader_ir.cpp3
-rw-r--r--src/video_core/shader/shader_ir.h7
-rw-r--r--src/video_core/surface.h97
-rw-r--r--src/video_core/texture_cache/surface_base.cpp81
-rw-r--r--src/video_core/texture_cache/surface_base.h49
-rw-r--r--src/video_core/texture_cache/surface_params.cpp34
-rw-r--r--src/video_core/texture_cache/surface_params.h36
-rw-r--r--src/video_core/texture_cache/texture_cache.h181
-rw-r--r--src/video_core/textures/astc.cpp241
-rw-r--r--src/video_core/textures/texture.cpp80
-rw-r--r--src/video_core/textures/texture.h46
100 files changed, 4741 insertions, 3232 deletions
diff --git a/src/video_core/CMakeLists.txt b/src/video_core/CMakeLists.txt
index effe76a63..258d58eba 100644
--- a/src/video_core/CMakeLists.txt
+++ b/src/video_core/CMakeLists.txt
@@ -148,6 +148,7 @@ add_library(video_core STATIC
textures/convert.h
textures/decoders.cpp
textures/decoders.h
+ textures/texture.cpp
textures/texture.h
video_core.cpp
video_core.h
@@ -155,7 +156,6 @@ add_library(video_core STATIC
if (ENABLE_VULKAN)
target_sources(video_core PRIVATE
- renderer_vulkan/declarations.h
renderer_vulkan/fixed_pipeline_state.cpp
renderer_vulkan/fixed_pipeline_state.h
renderer_vulkan/maxwell_to_vk.cpp
diff --git a/src/video_core/buffer_cache/buffer_block.h b/src/video_core/buffer_cache/buffer_block.h
index 4b9193182..e35ee0b67 100644
--- a/src/video_core/buffer_cache/buffer_block.h
+++ b/src/video_core/buffer_cache/buffer_block.h
@@ -15,37 +15,29 @@ namespace VideoCommon {
class BufferBlock {
public:
- bool Overlaps(const CacheAddr start, const CacheAddr end) const {
- return (cache_addr < end) && (cache_addr_end > start);
+ bool Overlaps(const VAddr start, const VAddr end) const {
+ return (cpu_addr < end) && (cpu_addr_end > start);
}
- bool IsInside(const CacheAddr other_start, const CacheAddr other_end) const {
- return cache_addr <= other_start && other_end <= cache_addr_end;
+ bool IsInside(const VAddr other_start, const VAddr other_end) const {
+ return cpu_addr <= other_start && other_end <= cpu_addr_end;
}
- u8* GetWritableHostPtr() const {
- return FromCacheAddr(cache_addr);
+ std::size_t GetOffset(const VAddr in_addr) {
+ return static_cast<std::size_t>(in_addr - cpu_addr);
}
- u8* GetWritableHostPtr(std::size_t offset) const {
- return FromCacheAddr(cache_addr + offset);
+ VAddr GetCpuAddr() const {
+ return cpu_addr;
}
- std::size_t GetOffset(const CacheAddr in_addr) {
- return static_cast<std::size_t>(in_addr - cache_addr);
+ VAddr GetCpuAddrEnd() const {
+ return cpu_addr_end;
}
- CacheAddr GetCacheAddr() const {
- return cache_addr;
- }
-
- CacheAddr GetCacheAddrEnd() const {
- return cache_addr_end;
- }
-
- void SetCacheAddr(const CacheAddr new_addr) {
- cache_addr = new_addr;
- cache_addr_end = new_addr + size;
+ void SetCpuAddr(const VAddr new_addr) {
+ cpu_addr = new_addr;
+ cpu_addr_end = new_addr + size;
}
std::size_t GetSize() const {
@@ -61,14 +53,14 @@ public:
}
protected:
- explicit BufferBlock(CacheAddr cache_addr, const std::size_t size) : size{size} {
- SetCacheAddr(cache_addr);
+ explicit BufferBlock(VAddr cpu_addr, const std::size_t size) : size{size} {
+ SetCpuAddr(cpu_addr);
}
~BufferBlock() = default;
private:
- CacheAddr cache_addr{};
- CacheAddr cache_addr_end{};
+ VAddr cpu_addr{};
+ VAddr cpu_addr_end{};
std::size_t size{};
u64 epoch{};
};
diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h
index 186aca61d..b57c0d4d4 100644
--- a/src/video_core/buffer_cache/buffer_cache.h
+++ b/src/video_core/buffer_cache/buffer_cache.h
@@ -19,6 +19,7 @@
#include "common/alignment.h"
#include "common/common_types.h"
#include "core/core.h"
+#include "core/memory.h"
#include "video_core/buffer_cache/buffer_block.h"
#include "video_core/buffer_cache/map_interval.h"
#include "video_core/memory_manager.h"
@@ -37,28 +38,45 @@ public:
bool is_written = false, bool use_fast_cbuf = false) {
std::lock_guard lock{mutex};
- auto& memory_manager = system.GPU().MemoryManager();
- const auto host_ptr = memory_manager.GetPointer(gpu_addr);
- if (!host_ptr) {
+ const std::optional<VAddr> cpu_addr_opt =
+ system.GPU().MemoryManager().GpuToCpuAddress(gpu_addr);
+
+ if (!cpu_addr_opt) {
return {GetEmptyBuffer(size), 0};
}
- const auto cache_addr = ToCacheAddr(host_ptr);
+
+ VAddr cpu_addr = *cpu_addr_opt;
// Cache management is a big overhead, so only cache entries with a given size.
// TODO: Figure out which size is the best for given games.
constexpr std::size_t max_stream_size = 0x800;
if (use_fast_cbuf || size < max_stream_size) {
- if (!is_written && !IsRegionWritten(cache_addr, cache_addr + size - 1)) {
+ if (!is_written && !IsRegionWritten(cpu_addr, cpu_addr + size - 1)) {
+ auto& memory_manager = system.GPU().MemoryManager();
if (use_fast_cbuf) {
- return ConstBufferUpload(host_ptr, size);
+ if (memory_manager.IsGranularRange(gpu_addr, size)) {
+ const auto host_ptr = memory_manager.GetPointer(gpu_addr);
+ return ConstBufferUpload(host_ptr, size);
+ } else {
+ staging_buffer.resize(size);
+ memory_manager.ReadBlockUnsafe(gpu_addr, staging_buffer.data(), size);
+ return ConstBufferUpload(staging_buffer.data(), size);
+ }
} else {
- return StreamBufferUpload(host_ptr, size, alignment);
+ if (memory_manager.IsGranularRange(gpu_addr, size)) {
+ const auto host_ptr = memory_manager.GetPointer(gpu_addr);
+ return StreamBufferUpload(host_ptr, size, alignment);
+ } else {
+ staging_buffer.resize(size);
+ memory_manager.ReadBlockUnsafe(gpu_addr, staging_buffer.data(), size);
+ return StreamBufferUpload(staging_buffer.data(), size, alignment);
+ }
}
}
}
- auto block = GetBlock(cache_addr, size);
- auto map = MapAddress(block, gpu_addr, cache_addr, size);
+ auto block = GetBlock(cpu_addr, size);
+ auto map = MapAddress(block, gpu_addr, cpu_addr, size);
if (is_written) {
map->MarkAsModified(true, GetModifiedTicks());
if (!map->IsWritten()) {
@@ -71,7 +89,7 @@ public:
}
}
- const u64 offset = static_cast<u64>(block->GetOffset(cache_addr));
+ const u64 offset = static_cast<u64>(block->GetOffset(cpu_addr));
return {ToHandle(block), offset};
}
@@ -112,7 +130,7 @@ public:
}
/// Write any cached resources overlapping the specified region back to memory
- void FlushRegion(CacheAddr addr, std::size_t size) {
+ void FlushRegion(VAddr addr, std::size_t size) {
std::lock_guard lock{mutex};
std::vector<MapInterval> objects = GetMapsInRange(addr, size);
@@ -127,7 +145,7 @@ public:
}
/// Mark the specified region as being invalidated
- void InvalidateRegion(CacheAddr addr, u64 size) {
+ void InvalidateRegion(VAddr addr, u64 size) {
std::lock_guard lock{mutex};
std::vector<MapInterval> objects = GetMapsInRange(addr, size);
@@ -152,7 +170,7 @@ protected:
virtual void WriteBarrier() = 0;
- virtual TBuffer CreateBlock(CacheAddr cache_addr, std::size_t size) = 0;
+ virtual TBuffer CreateBlock(VAddr cpu_addr, std::size_t size) = 0;
virtual void UploadBlockData(const TBuffer& buffer, std::size_t offset, std::size_t size,
const u8* data) = 0;
@@ -169,20 +187,17 @@ protected:
/// Register an object into the cache
void Register(const MapInterval& new_map, bool inherit_written = false) {
- const CacheAddr cache_ptr = new_map->GetStart();
- const std::optional<VAddr> cpu_addr =
- system.GPU().MemoryManager().GpuToCpuAddress(new_map->GetGpuAddress());
- if (!cache_ptr || !cpu_addr) {
+ const VAddr cpu_addr = new_map->GetStart();
+ if (!cpu_addr) {
LOG_CRITICAL(HW_GPU, "Failed to register buffer with unmapped gpu_address 0x{:016x}",
new_map->GetGpuAddress());
return;
}
const std::size_t size = new_map->GetEnd() - new_map->GetStart();
- new_map->SetCpuAddress(*cpu_addr);
new_map->MarkAsRegistered(true);
const IntervalType interval{new_map->GetStart(), new_map->GetEnd()};
mapped_addresses.insert({interval, new_map});
- rasterizer.UpdatePagesCachedCount(*cpu_addr, size, 1);
+ rasterizer.UpdatePagesCachedCount(cpu_addr, size, 1);
if (inherit_written) {
MarkRegionAsWritten(new_map->GetStart(), new_map->GetEnd() - 1);
new_map->MarkAsWritten(true);
@@ -192,7 +207,7 @@ protected:
/// Unregisters an object from the cache
void Unregister(MapInterval& map) {
const std::size_t size = map->GetEnd() - map->GetStart();
- rasterizer.UpdatePagesCachedCount(map->GetCpuAddress(), size, -1);
+ rasterizer.UpdatePagesCachedCount(map->GetStart(), size, -1);
map->MarkAsRegistered(false);
if (map->IsWritten()) {
UnmarkRegionAsWritten(map->GetStart(), map->GetEnd() - 1);
@@ -202,32 +217,39 @@ protected:
}
private:
- MapInterval CreateMap(const CacheAddr start, const CacheAddr end, const GPUVAddr gpu_addr) {
+ MapInterval CreateMap(const VAddr start, const VAddr end, const GPUVAddr gpu_addr) {
return std::make_shared<MapIntervalBase>(start, end, gpu_addr);
}
- MapInterval MapAddress(const TBuffer& block, const GPUVAddr gpu_addr,
- const CacheAddr cache_addr, const std::size_t size) {
+ MapInterval MapAddress(const TBuffer& block, const GPUVAddr gpu_addr, const VAddr cpu_addr,
+ const std::size_t size) {
- std::vector<MapInterval> overlaps = GetMapsInRange(cache_addr, size);
+ std::vector<MapInterval> overlaps = GetMapsInRange(cpu_addr, size);
if (overlaps.empty()) {
- const CacheAddr cache_addr_end = cache_addr + size;
- MapInterval new_map = CreateMap(cache_addr, cache_addr_end, gpu_addr);
- u8* host_ptr = FromCacheAddr(cache_addr);
- UploadBlockData(block, block->GetOffset(cache_addr), size, host_ptr);
+ auto& memory_manager = system.GPU().MemoryManager();
+ const VAddr cpu_addr_end = cpu_addr + size;
+ MapInterval new_map = CreateMap(cpu_addr, cpu_addr_end, gpu_addr);
+ if (memory_manager.IsGranularRange(gpu_addr, size)) {
+ u8* host_ptr = memory_manager.GetPointer(gpu_addr);
+ UploadBlockData(block, block->GetOffset(cpu_addr), size, host_ptr);
+ } else {
+ staging_buffer.resize(size);
+ memory_manager.ReadBlockUnsafe(gpu_addr, staging_buffer.data(), size);
+ UploadBlockData(block, block->GetOffset(cpu_addr), size, staging_buffer.data());
+ }
Register(new_map);
return new_map;
}
- const CacheAddr cache_addr_end = cache_addr + size;
+ const VAddr cpu_addr_end = cpu_addr + size;
if (overlaps.size() == 1) {
MapInterval& current_map = overlaps[0];
- if (current_map->IsInside(cache_addr, cache_addr_end)) {
+ if (current_map->IsInside(cpu_addr, cpu_addr_end)) {
return current_map;
}
}
- CacheAddr new_start = cache_addr;
- CacheAddr new_end = cache_addr_end;
+ VAddr new_start = cpu_addr;
+ VAddr new_end = cpu_addr_end;
bool write_inheritance = false;
bool modified_inheritance = false;
// Calculate new buffer parameters
@@ -237,7 +259,7 @@ private:
write_inheritance |= overlap->IsWritten();
modified_inheritance |= overlap->IsModified();
}
- GPUVAddr new_gpu_addr = gpu_addr + new_start - cache_addr;
+ GPUVAddr new_gpu_addr = gpu_addr + new_start - cpu_addr;
for (auto& overlap : overlaps) {
Unregister(overlap);
}
@@ -250,7 +272,7 @@ private:
return new_map;
}
- void UpdateBlock(const TBuffer& block, CacheAddr start, CacheAddr end,
+ void UpdateBlock(const TBuffer& block, VAddr start, VAddr end,
std::vector<MapInterval>& overlaps) {
const IntervalType base_interval{start, end};
IntervalSet interval_set{};
@@ -262,13 +284,15 @@ private:
for (auto& interval : interval_set) {
std::size_t size = interval.upper() - interval.lower();
if (size > 0) {
- u8* host_ptr = FromCacheAddr(interval.lower());
- UploadBlockData(block, block->GetOffset(interval.lower()), size, host_ptr);
+ staging_buffer.resize(size);
+ system.Memory().ReadBlockUnsafe(interval.lower(), staging_buffer.data(), size);
+ UploadBlockData(block, block->GetOffset(interval.lower()), size,
+ staging_buffer.data());
}
}
}
- std::vector<MapInterval> GetMapsInRange(CacheAddr addr, std::size_t size) {
+ std::vector<MapInterval> GetMapsInRange(VAddr addr, std::size_t size) {
if (size == 0) {
return {};
}
@@ -290,8 +314,9 @@ private:
void FlushMap(MapInterval map) {
std::size_t size = map->GetEnd() - map->GetStart();
TBuffer block = blocks[map->GetStart() >> block_page_bits];
- u8* host_ptr = FromCacheAddr(map->GetStart());
- DownloadBlockData(block, block->GetOffset(map->GetStart()), size, host_ptr);
+ staging_buffer.resize(size);
+ DownloadBlockData(block, block->GetOffset(map->GetStart()), size, staging_buffer.data());
+ system.Memory().WriteBlockUnsafe(map->GetStart(), staging_buffer.data(), size);
map->MarkAsModified(false, 0);
}
@@ -316,14 +341,14 @@ private:
TBuffer EnlargeBlock(TBuffer buffer) {
const std::size_t old_size = buffer->GetSize();
const std::size_t new_size = old_size + block_page_size;
- const CacheAddr cache_addr = buffer->GetCacheAddr();
- TBuffer new_buffer = CreateBlock(cache_addr, new_size);
+ const VAddr cpu_addr = buffer->GetCpuAddr();
+ TBuffer new_buffer = CreateBlock(cpu_addr, new_size);
CopyBlock(buffer, new_buffer, 0, 0, old_size);
buffer->SetEpoch(epoch);
pending_destruction.push_back(buffer);
- const CacheAddr cache_addr_end = cache_addr + new_size - 1;
- u64 page_start = cache_addr >> block_page_bits;
- const u64 page_end = cache_addr_end >> block_page_bits;
+ const VAddr cpu_addr_end = cpu_addr + new_size - 1;
+ u64 page_start = cpu_addr >> block_page_bits;
+ const u64 page_end = cpu_addr_end >> block_page_bits;
while (page_start <= page_end) {
blocks[page_start] = new_buffer;
++page_start;
@@ -334,9 +359,9 @@ private:
TBuffer MergeBlocks(TBuffer first, TBuffer second) {
const std::size_t size_1 = first->GetSize();
const std::size_t size_2 = second->GetSize();
- const CacheAddr first_addr = first->GetCacheAddr();
- const CacheAddr second_addr = second->GetCacheAddr();
- const CacheAddr new_addr = std::min(first_addr, second_addr);
+ const VAddr first_addr = first->GetCpuAddr();
+ const VAddr second_addr = second->GetCpuAddr();
+ const VAddr new_addr = std::min(first_addr, second_addr);
const std::size_t new_size = size_1 + size_2;
TBuffer new_buffer = CreateBlock(new_addr, new_size);
CopyBlock(first, new_buffer, 0, new_buffer->GetOffset(first_addr), size_1);
@@ -345,9 +370,9 @@ private:
second->SetEpoch(epoch);
pending_destruction.push_back(first);
pending_destruction.push_back(second);
- const CacheAddr cache_addr_end = new_addr + new_size - 1;
+ const VAddr cpu_addr_end = new_addr + new_size - 1;
u64 page_start = new_addr >> block_page_bits;
- const u64 page_end = cache_addr_end >> block_page_bits;
+ const u64 page_end = cpu_addr_end >> block_page_bits;
while (page_start <= page_end) {
blocks[page_start] = new_buffer;
++page_start;
@@ -355,18 +380,18 @@ private:
return new_buffer;
}
- TBuffer GetBlock(const CacheAddr cache_addr, const std::size_t size) {
+ TBuffer GetBlock(const VAddr cpu_addr, const std::size_t size) {
TBuffer found{};
- const CacheAddr cache_addr_end = cache_addr + size - 1;
- u64 page_start = cache_addr >> block_page_bits;
- const u64 page_end = cache_addr_end >> block_page_bits;
+ const VAddr cpu_addr_end = cpu_addr + size - 1;
+ u64 page_start = cpu_addr >> block_page_bits;
+ const u64 page_end = cpu_addr_end >> block_page_bits;
while (page_start <= page_end) {
auto it = blocks.find(page_start);
if (it == blocks.end()) {
if (found) {
found = EnlargeBlock(found);
} else {
- const CacheAddr start_addr = (page_start << block_page_bits);
+ const VAddr start_addr = (page_start << block_page_bits);
found = CreateBlock(start_addr, block_page_size);
blocks[page_start] = found;
}
@@ -386,7 +411,7 @@ private:
return found;
}
- void MarkRegionAsWritten(const CacheAddr start, const CacheAddr end) {
+ void MarkRegionAsWritten(const VAddr start, const VAddr end) {
u64 page_start = start >> write_page_bit;
const u64 page_end = end >> write_page_bit;
while (page_start <= page_end) {
@@ -400,7 +425,7 @@ private:
}
}
- void UnmarkRegionAsWritten(const CacheAddr start, const CacheAddr end) {
+ void UnmarkRegionAsWritten(const VAddr start, const VAddr end) {
u64 page_start = start >> write_page_bit;
const u64 page_end = end >> write_page_bit;
while (page_start <= page_end) {
@@ -416,7 +441,7 @@ private:
}
}
- bool IsRegionWritten(const CacheAddr start, const CacheAddr end) const {
+ bool IsRegionWritten(const VAddr start, const VAddr end) const {
u64 page_start = start >> write_page_bit;
const u64 page_end = end >> write_page_bit;
while (page_start <= page_end) {
@@ -440,8 +465,8 @@ private:
u64 buffer_offset = 0;
u64 buffer_offset_base = 0;
- using IntervalSet = boost::icl::interval_set<CacheAddr>;
- using IntervalCache = boost::icl::interval_map<CacheAddr, MapInterval>;
+ using IntervalSet = boost::icl::interval_set<VAddr>;
+ using IntervalCache = boost::icl::interval_map<VAddr, MapInterval>;
using IntervalType = typename IntervalCache::interval_type;
IntervalCache mapped_addresses;
@@ -456,6 +481,8 @@ private:
u64 epoch = 0;
u64 modified_ticks = 0;
+ std::vector<u8> staging_buffer;
+
std::recursive_mutex mutex;
};
diff --git a/src/video_core/buffer_cache/map_interval.h b/src/video_core/buffer_cache/map_interval.h
index 3a104d5cd..b0956029d 100644
--- a/src/video_core/buffer_cache/map_interval.h
+++ b/src/video_core/buffer_cache/map_interval.h
@@ -11,7 +11,7 @@ namespace VideoCommon {
class MapIntervalBase {
public:
- MapIntervalBase(const CacheAddr start, const CacheAddr end, const GPUVAddr gpu_addr)
+ MapIntervalBase(const VAddr start, const VAddr end, const GPUVAddr gpu_addr)
: start{start}, end{end}, gpu_addr{gpu_addr} {}
void SetCpuAddress(VAddr new_cpu_addr) {
@@ -26,7 +26,7 @@ public:
return gpu_addr;
}
- bool IsInside(const CacheAddr other_start, const CacheAddr other_end) const {
+ bool IsInside(const VAddr other_start, const VAddr other_end) const {
return (start <= other_start && other_end <= end);
}
@@ -46,11 +46,11 @@ public:
return is_registered;
}
- CacheAddr GetStart() const {
+ VAddr GetStart() const {
return start;
}
- CacheAddr GetEnd() const {
+ VAddr GetEnd() const {
return end;
}
@@ -76,8 +76,8 @@ public:
}
private:
- CacheAddr start;
- CacheAddr end;
+ VAddr start;
+ VAddr end;
GPUVAddr gpu_addr;
VAddr cpu_addr{};
bool is_written{};
diff --git a/src/video_core/engines/maxwell_3d.h b/src/video_core/engines/maxwell_3d.h
index d24c9f657..5cf6a4cc3 100644
--- a/src/video_core/engines/maxwell_3d.h
+++ b/src/video_core/engines/maxwell_3d.h
@@ -303,6 +303,10 @@ public:
return (type == Type::SignedNorm) || (type == Type::UnsignedNorm);
}
+ bool IsConstant() const {
+ return constant;
+ }
+
bool IsValid() const {
return size != Size::Invalid;
}
@@ -312,6 +316,35 @@ public:
}
};
+ struct MsaaSampleLocation {
+ union {
+ BitField<0, 4, u32> x0;
+ BitField<4, 4, u32> y0;
+ BitField<8, 4, u32> x1;
+ BitField<12, 4, u32> y1;
+ BitField<16, 4, u32> x2;
+ BitField<20, 4, u32> y2;
+ BitField<24, 4, u32> x3;
+ BitField<28, 4, u32> y3;
+ };
+
+ constexpr std::pair<u32, u32> Location(int index) const {
+ switch (index) {
+ case 0:
+ return {x0, y0};
+ case 1:
+ return {x1, y1};
+ case 2:
+ return {x2, y2};
+ case 3:
+ return {x3, y3};
+ default:
+ UNREACHABLE();
+ return {0, 0};
+ }
+ }
+ };
+
enum class DepthMode : u32 {
MinusOneToOne = 0,
ZeroToOne = 1,
@@ -793,7 +826,13 @@ public:
u32 rt_separate_frag_data;
- INSERT_UNION_PADDING_WORDS(0xC);
+ INSERT_UNION_PADDING_WORDS(0x1);
+
+ u32 multisample_raster_enable;
+ u32 multisample_raster_samples;
+ std::array<u32, 4> multisample_sample_mask;
+
+ INSERT_UNION_PADDING_WORDS(0x5);
struct {
u32 address_high;
@@ -830,7 +869,16 @@ public:
std::array<VertexAttribute, NumVertexAttributes> vertex_attrib_format;
- INSERT_UNION_PADDING_WORDS(0xF);
+ std::array<MsaaSampleLocation, 4> multisample_sample_locations;
+
+ INSERT_UNION_PADDING_WORDS(0x2);
+
+ union {
+ BitField<0, 1, u32> enable;
+ BitField<4, 3, u32> target;
+ } multisample_coverage_to_color;
+
+ INSERT_UNION_PADDING_WORDS(0x8);
struct {
union {
@@ -922,7 +970,10 @@ public:
BitField<4, 1, u32> triangle_rast_flip;
} screen_y_control;
- INSERT_UNION_PADDING_WORDS(0x21);
+ float line_width_smooth;
+ float line_width_aliased;
+
+ INSERT_UNION_PADDING_WORDS(0x1F);
u32 vb_element_base;
u32 vb_base_instance;
@@ -943,7 +994,7 @@ public:
CounterReset counter_reset;
- INSERT_UNION_PADDING_WORDS(0x1);
+ u32 multisample_enable;
u32 zeta_enable;
@@ -980,7 +1031,7 @@ public:
float polygon_offset_factor;
- INSERT_UNION_PADDING_WORDS(0x1);
+ u32 line_smooth_enable;
struct {
u32 tic_address_high;
@@ -1007,7 +1058,11 @@ public:
float polygon_offset_units;
- INSERT_UNION_PADDING_WORDS(0x11);
+ INSERT_UNION_PADDING_WORDS(0x4);
+
+ Tegra::Texture::MsaaMode multisample_mode;
+
+ INSERT_UNION_PADDING_WORDS(0xC);
union {
BitField<2, 1, u32> coord_origin;
@@ -1507,12 +1562,17 @@ ASSERT_REG_POSITION(stencil_back_func_ref, 0x3D5);
ASSERT_REG_POSITION(stencil_back_mask, 0x3D6);
ASSERT_REG_POSITION(stencil_back_func_mask, 0x3D7);
ASSERT_REG_POSITION(color_mask_common, 0x3E4);
-ASSERT_REG_POSITION(rt_separate_frag_data, 0x3EB);
ASSERT_REG_POSITION(depth_bounds, 0x3E7);
+ASSERT_REG_POSITION(rt_separate_frag_data, 0x3EB);
+ASSERT_REG_POSITION(multisample_raster_enable, 0x3ED);
+ASSERT_REG_POSITION(multisample_raster_samples, 0x3EE);
+ASSERT_REG_POSITION(multisample_sample_mask, 0x3EF);
ASSERT_REG_POSITION(zeta, 0x3F8);
ASSERT_REG_POSITION(clear_flags, 0x43E);
ASSERT_REG_POSITION(fill_rectangle, 0x44F);
ASSERT_REG_POSITION(vertex_attrib_format, 0x458);
+ASSERT_REG_POSITION(multisample_sample_locations, 0x478);
+ASSERT_REG_POSITION(multisample_coverage_to_color, 0x47E);
ASSERT_REG_POSITION(rt_control, 0x487);
ASSERT_REG_POSITION(zeta_width, 0x48a);
ASSERT_REG_POSITION(zeta_height, 0x48b);
@@ -1538,6 +1598,8 @@ ASSERT_REG_POSITION(stencil_front_func_mask, 0x4E6);
ASSERT_REG_POSITION(stencil_front_mask, 0x4E7);
ASSERT_REG_POSITION(frag_color_clamp, 0x4EA);
ASSERT_REG_POSITION(screen_y_control, 0x4EB);
+ASSERT_REG_POSITION(line_width_smooth, 0x4EC);
+ASSERT_REG_POSITION(line_width_aliased, 0x4ED);
ASSERT_REG_POSITION(vb_element_base, 0x50D);
ASSERT_REG_POSITION(vb_base_instance, 0x50E);
ASSERT_REG_POSITION(clip_distance_enabled, 0x544);
@@ -1545,11 +1607,13 @@ ASSERT_REG_POSITION(samplecnt_enable, 0x545);
ASSERT_REG_POSITION(point_size, 0x546);
ASSERT_REG_POSITION(point_sprite_enable, 0x548);
ASSERT_REG_POSITION(counter_reset, 0x54C);
+ASSERT_REG_POSITION(multisample_enable, 0x54D);
ASSERT_REG_POSITION(zeta_enable, 0x54E);
ASSERT_REG_POSITION(multisample_control, 0x54F);
ASSERT_REG_POSITION(condition, 0x554);
ASSERT_REG_POSITION(tsc, 0x557);
-ASSERT_REG_POSITION(polygon_offset_factor, 0x55b);
+ASSERT_REG_POSITION(polygon_offset_factor, 0x55B);
+ASSERT_REG_POSITION(line_smooth_enable, 0x55C);
ASSERT_REG_POSITION(tic, 0x55D);
ASSERT_REG_POSITION(stencil_two_side_enable, 0x565);
ASSERT_REG_POSITION(stencil_back_op_fail, 0x566);
@@ -1558,6 +1622,7 @@ ASSERT_REG_POSITION(stencil_back_op_zpass, 0x568);
ASSERT_REG_POSITION(stencil_back_func_func, 0x569);
ASSERT_REG_POSITION(framebuffer_srgb, 0x56E);
ASSERT_REG_POSITION(polygon_offset_units, 0x56F);
+ASSERT_REG_POSITION(multisample_mode, 0x574);
ASSERT_REG_POSITION(point_coord_replace, 0x581);
ASSERT_REG_POSITION(code_address, 0x582);
ASSERT_REG_POSITION(draw, 0x585);
diff --git a/src/video_core/engines/shader_bytecode.h b/src/video_core/engines/shader_bytecode.h
index a31947ef3..5e9cfba22 100644
--- a/src/video_core/engines/shader_bytecode.h
+++ b/src/video_core/engines/shader_bytecode.h
@@ -290,6 +290,23 @@ enum class VmadShr : u64 {
Shr15 = 2,
};
+enum class VmnmxType : u64 {
+ Bits8,
+ Bits16,
+ Bits32,
+};
+
+enum class VmnmxOperation : u64 {
+ Mrg_16H = 0,
+ Mrg_16L = 1,
+ Mrg_8B0 = 2,
+ Mrg_8B2 = 3,
+ Acc = 4,
+ Min = 5,
+ Max = 6,
+ Nop = 7,
+};
+
enum class XmadMode : u64 {
None = 0,
CLo = 1,
@@ -1657,6 +1674,42 @@ union Instruction {
} vmad;
union {
+ BitField<54, 1, u64> is_dest_signed;
+ BitField<48, 1, u64> is_src_a_signed;
+ BitField<49, 1, u64> is_src_b_signed;
+ BitField<37, 2, u64> src_format_a;
+ BitField<29, 2, u64> src_format_b;
+ BitField<56, 1, u64> mx;
+ BitField<55, 1, u64> sat;
+ BitField<36, 2, u64> selector_a;
+ BitField<28, 2, u64> selector_b;
+ BitField<50, 1, u64> is_op_b_register;
+ BitField<51, 3, VmnmxOperation> operation;
+
+ VmnmxType SourceFormatA() const {
+ switch (src_format_a) {
+ case 0b11:
+ return VmnmxType::Bits32;
+ case 0b10:
+ return VmnmxType::Bits16;
+ default:
+ return VmnmxType::Bits8;
+ }
+ }
+
+ VmnmxType SourceFormatB() const {
+ switch (src_format_b) {
+ case 0b11:
+ return VmnmxType::Bits32;
+ case 0b10:
+ return VmnmxType::Bits16;
+ default:
+ return VmnmxType::Bits8;
+ }
+ }
+ } vmnmx;
+
+ union {
BitField<20, 16, u64> imm20_16;
BitField<35, 1, u64> high_b_rr; // used on RR
BitField<36, 1, u64> product_shift_left;
@@ -1718,6 +1771,7 @@ public:
BRK,
DEPBAR,
VOTE,
+ VOTE_VTG,
SHFL,
FSWZADD,
BFE_C,
@@ -1765,9 +1819,11 @@ public:
IPA,
OUT_R, // Emit vertex/primitive
ISBERD,
+ BAR,
MEMBAR,
VMAD,
VSETP,
+ VMNMX,
FFMA_IMM, // Fused Multiply and Add
FFMA_CR,
FFMA_RC,
@@ -1822,7 +1878,8 @@ public:
ICMP_R,
ICMP_CR,
ICMP_IMM,
- FCMP_R,
+ FCMP_RR,
+ FCMP_RC,
MUFU, // Multi-Function Operator
RRO_C, // Range Reduction Operator
RRO_R,
@@ -1849,7 +1906,7 @@ public:
MOV_C,
MOV_R,
MOV_IMM,
- MOV_SYS,
+ S2R,
MOV32_IMM,
SHL_C,
SHL_R,
@@ -2033,6 +2090,7 @@ private:
INST("111000110000----", Id::EXIT, Type::Flow, "EXIT"),
INST("1111000011110---", Id::DEPBAR, Type::Synch, "DEPBAR"),
INST("0101000011011---", Id::VOTE, Type::Warp, "VOTE"),
+ INST("0101000011100---", Id::VOTE_VTG, Type::Warp, "VOTE_VTG"),
INST("1110111100010---", Id::SHFL, Type::Warp, "SHFL"),
INST("0101000011111---", Id::FSWZADD, Type::Warp, "FSWZADD"),
INST("1110111111011---", Id::LD_A, Type::Memory, "LD_A"),
@@ -2071,9 +2129,11 @@ private:
INST("11100000--------", Id::IPA, Type::Trivial, "IPA"),
INST("1111101111100---", Id::OUT_R, Type::Trivial, "OUT_R"),
INST("1110111111010---", Id::ISBERD, Type::Trivial, "ISBERD"),
+ INST("1111000010101---", Id::BAR, Type::Trivial, "BAR"),
INST("1110111110011---", Id::MEMBAR, Type::Trivial, "MEMBAR"),
INST("01011111--------", Id::VMAD, Type::Video, "VMAD"),
INST("0101000011110---", Id::VSETP, Type::Video, "VSETP"),
+ INST("0011101---------", Id::VMNMX, Type::Video, "VMNMX"),
INST("0011001-1-------", Id::FFMA_IMM, Type::Ffma, "FFMA_IMM"),
INST("010010011-------", Id::FFMA_CR, Type::Ffma, "FFMA_CR"),
INST("010100011-------", Id::FFMA_RC, Type::Ffma, "FFMA_RC"),
@@ -2128,7 +2188,8 @@ private:
INST("0101110100100---", Id::HSETP2_R, Type::HalfSetPredicate, "HSETP2_R"),
INST("0111111-0-------", Id::HSETP2_IMM, Type::HalfSetPredicate, "HSETP2_IMM"),
INST("0101110100011---", Id::HSET2_R, Type::HalfSet, "HSET2_R"),
- INST("010110111010----", Id::FCMP_R, Type::Arithmetic, "FCMP_R"),
+ INST("010110111010----", Id::FCMP_RR, Type::Arithmetic, "FCMP_RR"),
+ INST("010010111010----", Id::FCMP_RC, Type::Arithmetic, "FCMP_RC"),
INST("0101000010000---", Id::MUFU, Type::Arithmetic, "MUFU"),
INST("0100110010010---", Id::RRO_C, Type::Arithmetic, "RRO_C"),
INST("0101110010010---", Id::RRO_R, Type::Arithmetic, "RRO_R"),
@@ -2142,7 +2203,7 @@ private:
INST("0100110010011---", Id::MOV_C, Type::Arithmetic, "MOV_C"),
INST("0101110010011---", Id::MOV_R, Type::Arithmetic, "MOV_R"),
INST("0011100-10011---", Id::MOV_IMM, Type::Arithmetic, "MOV_IMM"),
- INST("1111000011001---", Id::MOV_SYS, Type::Trivial, "MOV_SYS"),
+ INST("1111000011001---", Id::S2R, Type::Trivial, "S2R"),
INST("000000010000----", Id::MOV32_IMM, Type::ArithmeticImmediate, "MOV32_IMM"),
INST("0100110001100---", Id::FMNMX_C, Type::Arithmetic, "FMNMX_C"),
INST("0101110001100---", Id::FMNMX_R, Type::Arithmetic, "FMNMX_R"),
@@ -2174,7 +2235,7 @@ private:
INST("0011011-11111---", Id::SHF_LEFT_IMM, Type::Shift, "SHF_LEFT_IMM"),
INST("0100110011100---", Id::I2I_C, Type::Conversion, "I2I_C"),
INST("0101110011100---", Id::I2I_R, Type::Conversion, "I2I_R"),
- INST("0011101-11100---", Id::I2I_IMM, Type::Conversion, "I2I_IMM"),
+ INST("0011100-11100---", Id::I2I_IMM, Type::Conversion, "I2I_IMM"),
INST("0100110010111---", Id::I2F_C, Type::Conversion, "I2F_C"),
INST("0101110010111---", Id::I2F_R, Type::Conversion, "I2F_R"),
INST("0011100-10111---", Id::I2F_IMM, Type::Conversion, "I2F_IMM"),
diff --git a/src/video_core/engines/shader_header.h b/src/video_core/engines/shader_header.h
index bc80661d8..72e2a33d5 100644
--- a/src/video_core/engines/shader_header.h
+++ b/src/video_core/engines/shader_header.h
@@ -4,6 +4,9 @@
#pragma once
+#include <array>
+#include <optional>
+
#include "common/bit_field.h"
#include "common/common_funcs.h"
#include "common/common_types.h"
@@ -16,7 +19,7 @@ enum class OutputTopology : u32 {
TriangleStrip = 7,
};
-enum class AttributeUse : u8 {
+enum class PixelImap : u8 {
Unused = 0,
Constant = 1,
Perspective = 2,
@@ -24,7 +27,7 @@ enum class AttributeUse : u8 {
};
// Documentation in:
-// http://download.nvidia.com/open-gpu-doc/Shader-Program-Header/1/Shader-Program-Header.html#ImapTexture
+// http://download.nvidia.com/open-gpu-doc/Shader-Program-Header/1/Shader-Program-Header.html
struct Header {
union {
BitField<0, 5, u32> sph_type;
@@ -59,8 +62,8 @@ struct Header {
union {
BitField<0, 12, u32> max_output_vertices;
BitField<12, 8, u32> store_req_start; // NOTE: not used by geometry shaders.
- BitField<24, 4, u32> reserved;
- BitField<12, 8, u32> store_req_end; // NOTE: not used by geometry shaders.
+ BitField<20, 4, u32> reserved;
+ BitField<24, 8, u32> store_req_end; // NOTE: not used by geometry shaders.
} common4{};
union {
@@ -93,17 +96,20 @@ struct Header {
struct {
INSERT_UNION_PADDING_BYTES(3); // ImapSystemValuesA
INSERT_UNION_PADDING_BYTES(1); // ImapSystemValuesB
+
union {
- BitField<0, 2, AttributeUse> x;
- BitField<2, 2, AttributeUse> y;
- BitField<4, 2, AttributeUse> w;
- BitField<6, 2, AttributeUse> z;
+ BitField<0, 2, PixelImap> x;
+ BitField<2, 2, PixelImap> y;
+ BitField<4, 2, PixelImap> z;
+ BitField<6, 2, PixelImap> w;
u8 raw;
} imap_generic_vector[32];
+
INSERT_UNION_PADDING_BYTES(2); // ImapColor
INSERT_UNION_PADDING_BYTES(2); // ImapSystemValuesC
INSERT_UNION_PADDING_BYTES(10); // ImapFixedFncTexture[10]
INSERT_UNION_PADDING_BYTES(2); // ImapReserved
+
struct {
u32 target;
union {
@@ -112,31 +118,30 @@ struct Header {
BitField<2, 30, u32> reserved;
};
} omap;
+
bool IsColorComponentOutputEnabled(u32 render_target, u32 component) const {
const u32 bit = render_target * 4 + component;
return omap.target & (1 << bit);
}
- AttributeUse GetAttributeIndexUse(u32 attribute, u32 index) const {
- return static_cast<AttributeUse>(
- (imap_generic_vector[attribute].raw >> (index * 2)) & 0x03);
- }
- AttributeUse GetAttributeUse(u32 attribute) const {
- AttributeUse result = AttributeUse::Unused;
- for (u32 i = 0; i < 4; i++) {
- const auto index = GetAttributeIndexUse(attribute, i);
- if (index == AttributeUse::Unused) {
- continue;
- }
- if (result == AttributeUse::Unused || result == index) {
- result = index;
+
+ PixelImap GetPixelImap(u32 attribute) const {
+ const auto get_index = [this, attribute](u32 index) {
+ return static_cast<PixelImap>(
+ (imap_generic_vector[attribute].raw >> (index * 2)) & 3);
+ };
+
+ std::optional<PixelImap> result;
+ for (u32 component = 0; component < 4; ++component) {
+ const PixelImap index = get_index(component);
+ if (index == PixelImap::Unused) {
continue;
}
- LOG_CRITICAL(HW_GPU, "Generic Attribute Conflict in Interpolation Mode");
- if (index == AttributeUse::Perspective) {
- result = index;
+ if (result && result != index) {
+ LOG_CRITICAL(HW_GPU, "Generic attribute conflict in interpolation mode");
}
+ result = index;
}
- return result;
+ return result.value_or(PixelImap::Unused);
}
} ps;
diff --git a/src/video_core/gpu.h b/src/video_core/gpu.h
index ced9d7e28..1a2d747be 100644
--- a/src/video_core/gpu.h
+++ b/src/video_core/gpu.h
@@ -270,13 +270,13 @@ public:
virtual void SwapBuffers(const Tegra::FramebufferConfig* framebuffer) = 0;
/// Notify rasterizer that any caches of the specified region should be flushed to Switch memory
- virtual void FlushRegion(CacheAddr addr, u64 size) = 0;
+ virtual void FlushRegion(VAddr addr, u64 size) = 0;
/// Notify rasterizer that any caches of the specified region should be invalidated
- virtual void InvalidateRegion(CacheAddr addr, u64 size) = 0;
+ virtual void InvalidateRegion(VAddr addr, u64 size) = 0;
/// Notify rasterizer that any caches of the specified region should be flushed and invalidated
- virtual void FlushAndInvalidateRegion(CacheAddr addr, u64 size) = 0;
+ virtual void FlushAndInvalidateRegion(VAddr addr, u64 size) = 0;
protected:
virtual void TriggerCpuInterrupt(u32 syncpoint_id, u32 value) const = 0;
diff --git a/src/video_core/gpu_asynch.cpp b/src/video_core/gpu_asynch.cpp
index 925be8d7b..20e73a37e 100644
--- a/src/video_core/gpu_asynch.cpp
+++ b/src/video_core/gpu_asynch.cpp
@@ -12,8 +12,9 @@ namespace VideoCommon {
GPUAsynch::GPUAsynch(Core::System& system, std::unique_ptr<VideoCore::RendererBase>&& renderer_,
std::unique_ptr<Core::Frontend::GraphicsContext>&& context)
- : GPU(system, std::move(renderer_), true), gpu_thread{system}, gpu_context(std::move(context)),
- cpu_context(renderer->GetRenderWindow().CreateSharedContext()) {}
+ : GPU(system, std::move(renderer_), true), gpu_thread{system},
+ cpu_context(renderer->GetRenderWindow().CreateSharedContext()),
+ gpu_context(std::move(context)) {}
GPUAsynch::~GPUAsynch() = default;
@@ -30,15 +31,15 @@ void GPUAsynch::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) {
gpu_thread.SwapBuffers(framebuffer);
}
-void GPUAsynch::FlushRegion(CacheAddr addr, u64 size) {
+void GPUAsynch::FlushRegion(VAddr addr, u64 size) {
gpu_thread.FlushRegion(addr, size);
}
-void GPUAsynch::InvalidateRegion(CacheAddr addr, u64 size) {
+void GPUAsynch::InvalidateRegion(VAddr addr, u64 size) {
gpu_thread.InvalidateRegion(addr, size);
}
-void GPUAsynch::FlushAndInvalidateRegion(CacheAddr addr, u64 size) {
+void GPUAsynch::FlushAndInvalidateRegion(VAddr addr, u64 size) {
gpu_thread.FlushAndInvalidateRegion(addr, size);
}
diff --git a/src/video_core/gpu_asynch.h b/src/video_core/gpu_asynch.h
index 265c62758..03fd0eef0 100644
--- a/src/video_core/gpu_asynch.h
+++ b/src/video_core/gpu_asynch.h
@@ -27,9 +27,9 @@ public:
void Start() override;
void PushGPUEntries(Tegra::CommandList&& entries) override;
void SwapBuffers(const Tegra::FramebufferConfig* framebuffer) override;
- void FlushRegion(CacheAddr addr, u64 size) override;
- void InvalidateRegion(CacheAddr addr, u64 size) override;
- void FlushAndInvalidateRegion(CacheAddr addr, u64 size) override;
+ void FlushRegion(VAddr addr, u64 size) override;
+ void InvalidateRegion(VAddr addr, u64 size) override;
+ void FlushAndInvalidateRegion(VAddr addr, u64 size) override;
void WaitIdle() const override;
protected:
diff --git a/src/video_core/gpu_synch.cpp b/src/video_core/gpu_synch.cpp
index bd5278a5c..6f38a672a 100644
--- a/src/video_core/gpu_synch.cpp
+++ b/src/video_core/gpu_synch.cpp
@@ -26,15 +26,15 @@ void GPUSynch::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) {
renderer->SwapBuffers(framebuffer);
}
-void GPUSynch::FlushRegion(CacheAddr addr, u64 size) {
+void GPUSynch::FlushRegion(VAddr addr, u64 size) {
renderer->Rasterizer().FlushRegion(addr, size);
}
-void GPUSynch::InvalidateRegion(CacheAddr addr, u64 size) {
+void GPUSynch::InvalidateRegion(VAddr addr, u64 size) {
renderer->Rasterizer().InvalidateRegion(addr, size);
}
-void GPUSynch::FlushAndInvalidateRegion(CacheAddr addr, u64 size) {
+void GPUSynch::FlushAndInvalidateRegion(VAddr addr, u64 size) {
renderer->Rasterizer().FlushAndInvalidateRegion(addr, size);
}
diff --git a/src/video_core/gpu_synch.h b/src/video_core/gpu_synch.h
index 866a94c8c..4a6e9a01d 100644
--- a/src/video_core/gpu_synch.h
+++ b/src/video_core/gpu_synch.h
@@ -26,9 +26,9 @@ public:
void Start() override;
void PushGPUEntries(Tegra::CommandList&& entries) override;
void SwapBuffers(const Tegra::FramebufferConfig* framebuffer) override;
- void FlushRegion(CacheAddr addr, u64 size) override;
- void InvalidateRegion(CacheAddr addr, u64 size) override;
- void FlushAndInvalidateRegion(CacheAddr addr, u64 size) override;
+ void FlushRegion(VAddr addr, u64 size) override;
+ void InvalidateRegion(VAddr addr, u64 size) override;
+ void FlushAndInvalidateRegion(VAddr addr, u64 size) override;
void WaitIdle() const override {}
protected:
diff --git a/src/video_core/gpu_thread.cpp b/src/video_core/gpu_thread.cpp
index 270c7ae0d..10cda686b 100644
--- a/src/video_core/gpu_thread.cpp
+++ b/src/video_core/gpu_thread.cpp
@@ -77,15 +77,15 @@ void ThreadManager::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) {
PushCommand(SwapBuffersCommand(framebuffer ? std::make_optional(*framebuffer) : std::nullopt));
}
-void ThreadManager::FlushRegion(CacheAddr addr, u64 size) {
+void ThreadManager::FlushRegion(VAddr addr, u64 size) {
PushCommand(FlushRegionCommand(addr, size));
}
-void ThreadManager::InvalidateRegion(CacheAddr addr, u64 size) {
+void ThreadManager::InvalidateRegion(VAddr addr, u64 size) {
system.Renderer().Rasterizer().InvalidateRegion(addr, size);
}
-void ThreadManager::FlushAndInvalidateRegion(CacheAddr addr, u64 size) {
+void ThreadManager::FlushAndInvalidateRegion(VAddr addr, u64 size) {
// Skip flush on asynch mode, as FlushAndInvalidateRegion is not used for anything too important
InvalidateRegion(addr, size);
}
diff --git a/src/video_core/gpu_thread.h b/src/video_core/gpu_thread.h
index be36c580e..cd74ad330 100644
--- a/src/video_core/gpu_thread.h
+++ b/src/video_core/gpu_thread.h
@@ -47,26 +47,26 @@ struct SwapBuffersCommand final {
/// Command to signal to the GPU thread to flush a region
struct FlushRegionCommand final {
- explicit constexpr FlushRegionCommand(CacheAddr addr, u64 size) : addr{addr}, size{size} {}
+ explicit constexpr FlushRegionCommand(VAddr addr, u64 size) : addr{addr}, size{size} {}
- CacheAddr addr;
+ VAddr addr;
u64 size;
};
/// Command to signal to the GPU thread to invalidate a region
struct InvalidateRegionCommand final {
- explicit constexpr InvalidateRegionCommand(CacheAddr addr, u64 size) : addr{addr}, size{size} {}
+ explicit constexpr InvalidateRegionCommand(VAddr addr, u64 size) : addr{addr}, size{size} {}
- CacheAddr addr;
+ VAddr addr;
u64 size;
};
/// Command to signal to the GPU thread to flush and invalidate a region
struct FlushAndInvalidateRegionCommand final {
- explicit constexpr FlushAndInvalidateRegionCommand(CacheAddr addr, u64 size)
+ explicit constexpr FlushAndInvalidateRegionCommand(VAddr addr, u64 size)
: addr{addr}, size{size} {}
- CacheAddr addr;
+ VAddr addr;
u64 size;
};
@@ -111,13 +111,13 @@ public:
void SwapBuffers(const Tegra::FramebufferConfig* framebuffer);
/// Notify rasterizer that any caches of the specified region should be flushed to Switch memory
- void FlushRegion(CacheAddr addr, u64 size);
+ void FlushRegion(VAddr addr, u64 size);
/// Notify rasterizer that any caches of the specified region should be invalidated
- void InvalidateRegion(CacheAddr addr, u64 size);
+ void InvalidateRegion(VAddr addr, u64 size);
/// Notify rasterizer that any caches of the specified region should be flushed and invalidated
- void FlushAndInvalidateRegion(CacheAddr addr, u64 size);
+ void FlushAndInvalidateRegion(VAddr addr, u64 size);
// Wait until the gpu thread is idle.
void WaitIdle() const;
diff --git a/src/video_core/memory_manager.cpp b/src/video_core/memory_manager.cpp
index f5d33f27a..a3389d0d2 100644
--- a/src/video_core/memory_manager.cpp
+++ b/src/video_core/memory_manager.cpp
@@ -81,12 +81,11 @@ GPUVAddr MemoryManager::UnmapBuffer(GPUVAddr gpu_addr, u64 size) {
ASSERT((gpu_addr & page_mask) == 0);
const u64 aligned_size{Common::AlignUp(size, page_size)};
- const CacheAddr cache_addr{ToCacheAddr(GetPointer(gpu_addr))};
const auto cpu_addr = GpuToCpuAddress(gpu_addr);
ASSERT(cpu_addr);
// Flush and invalidate through the GPU interface, to be asynchronous if possible.
- system.GPU().FlushAndInvalidateRegion(cache_addr, aligned_size);
+ system.GPU().FlushAndInvalidateRegion(*cpu_addr, aligned_size);
UnmapRange(gpu_addr, aligned_size);
ASSERT(system.CurrentProcess()
@@ -140,11 +139,11 @@ T MemoryManager::Read(GPUVAddr addr) const {
return {};
}
- const u8* page_pointer{page_table.pointers[addr >> page_bits]};
+ const u8* page_pointer{GetPointer(addr)};
if (page_pointer) {
// NOTE: Avoid adding any extra logic to this fast-path block
T value;
- std::memcpy(&value, &page_pointer[addr & page_mask], sizeof(T));
+ std::memcpy(&value, page_pointer, sizeof(T));
return value;
}
@@ -167,10 +166,10 @@ void MemoryManager::Write(GPUVAddr addr, T data) {
return;
}
- u8* page_pointer{page_table.pointers[addr >> page_bits]};
+ u8* page_pointer{GetPointer(addr)};
if (page_pointer) {
// NOTE: Avoid adding any extra logic to this fast-path block
- std::memcpy(&page_pointer[addr & page_mask], &data, sizeof(T));
+ std::memcpy(page_pointer, &data, sizeof(T));
return;
}
@@ -201,9 +200,12 @@ u8* MemoryManager::GetPointer(GPUVAddr addr) {
return {};
}
- u8* const page_pointer{page_table.pointers[addr >> page_bits]};
- if (page_pointer != nullptr) {
- return page_pointer + (addr & page_mask);
+ auto& memory = system.Memory();
+
+ const VAddr page_addr{page_table.backing_addr[addr >> page_bits]};
+
+ if (page_addr != 0) {
+ return memory.GetPointer(page_addr + (addr & page_mask));
}
LOG_ERROR(HW_GPU, "Unknown GetPointer @ 0x{:016X}", addr);
@@ -215,9 +217,12 @@ const u8* MemoryManager::GetPointer(GPUVAddr addr) const {
return {};
}
- const u8* const page_pointer{page_table.pointers[addr >> page_bits]};
- if (page_pointer != nullptr) {
- return page_pointer + (addr & page_mask);
+ const auto& memory = system.Memory();
+
+ const VAddr page_addr{page_table.backing_addr[addr >> page_bits]};
+
+ if (page_addr != 0) {
+ return memory.GetPointer(page_addr + (addr & page_mask));
}
LOG_ERROR(HW_GPU, "Unknown GetPointer @ 0x{:016X}", addr);
@@ -238,17 +243,19 @@ void MemoryManager::ReadBlock(GPUVAddr src_addr, void* dest_buffer, const std::s
std::size_t page_index{src_addr >> page_bits};
std::size_t page_offset{src_addr & page_mask};
+ auto& memory = system.Memory();
+
while (remaining_size > 0) {
const std::size_t copy_amount{
std::min(static_cast<std::size_t>(page_size) - page_offset, remaining_size)};
switch (page_table.attributes[page_index]) {
case Common::PageType::Memory: {
- const u8* src_ptr{page_table.pointers[page_index] + page_offset};
+ const VAddr src_addr{page_table.backing_addr[page_index] + page_offset};
// Flush must happen on the rasterizer interface, such that memory is always synchronous
// when it is read (even when in asynchronous GPU mode). Fixes Dead Cells title menu.
- rasterizer.FlushRegion(ToCacheAddr(src_ptr), copy_amount);
- std::memcpy(dest_buffer, src_ptr, copy_amount);
+ rasterizer.FlushRegion(src_addr, copy_amount);
+ memory.ReadBlockUnsafe(src_addr, dest_buffer, copy_amount);
break;
}
default:
@@ -268,13 +275,15 @@ void MemoryManager::ReadBlockUnsafe(GPUVAddr src_addr, void* dest_buffer,
std::size_t page_index{src_addr >> page_bits};
std::size_t page_offset{src_addr & page_mask};
+ auto& memory = system.Memory();
+
while (remaining_size > 0) {
const std::size_t copy_amount{
std::min(static_cast<std::size_t>(page_size) - page_offset, remaining_size)};
const u8* page_pointer = page_table.pointers[page_index];
if (page_pointer) {
- const u8* src_ptr{page_pointer + page_offset};
- std::memcpy(dest_buffer, src_ptr, copy_amount);
+ const VAddr src_addr{page_table.backing_addr[page_index] + page_offset};
+ memory.ReadBlockUnsafe(src_addr, dest_buffer, copy_amount);
} else {
std::memset(dest_buffer, 0, copy_amount);
}
@@ -290,17 +299,19 @@ void MemoryManager::WriteBlock(GPUVAddr dest_addr, const void* src_buffer, const
std::size_t page_index{dest_addr >> page_bits};
std::size_t page_offset{dest_addr & page_mask};
+ auto& memory = system.Memory();
+
while (remaining_size > 0) {
const std::size_t copy_amount{
std::min(static_cast<std::size_t>(page_size) - page_offset, remaining_size)};
switch (page_table.attributes[page_index]) {
case Common::PageType::Memory: {
- u8* dest_ptr{page_table.pointers[page_index] + page_offset};
+ const VAddr dest_addr{page_table.backing_addr[page_index] + page_offset};
// Invalidate must happen on the rasterizer interface, such that memory is always
// synchronous when it is written (even when in asynchronous GPU mode).
- rasterizer.InvalidateRegion(ToCacheAddr(dest_ptr), copy_amount);
- std::memcpy(dest_ptr, src_buffer, copy_amount);
+ rasterizer.InvalidateRegion(dest_addr, copy_amount);
+ memory.WriteBlockUnsafe(dest_addr, src_buffer, copy_amount);
break;
}
default:
@@ -320,13 +331,15 @@ void MemoryManager::WriteBlockUnsafe(GPUVAddr dest_addr, const void* src_buffer,
std::size_t page_index{dest_addr >> page_bits};
std::size_t page_offset{dest_addr & page_mask};
+ auto& memory = system.Memory();
+
while (remaining_size > 0) {
const std::size_t copy_amount{
std::min(static_cast<std::size_t>(page_size) - page_offset, remaining_size)};
u8* page_pointer = page_table.pointers[page_index];
if (page_pointer) {
- u8* dest_ptr{page_pointer + page_offset};
- std::memcpy(dest_ptr, src_buffer, copy_amount);
+ const VAddr dest_addr{page_table.backing_addr[page_index] + page_offset};
+ memory.WriteBlockUnsafe(dest_addr, src_buffer, copy_amount);
}
page_index++;
page_offset = 0;
@@ -336,33 +349,9 @@ void MemoryManager::WriteBlockUnsafe(GPUVAddr dest_addr, const void* src_buffer,
}
void MemoryManager::CopyBlock(GPUVAddr dest_addr, GPUVAddr src_addr, const std::size_t size) {
- std::size_t remaining_size{size};
- std::size_t page_index{src_addr >> page_bits};
- std::size_t page_offset{src_addr & page_mask};
-
- while (remaining_size > 0) {
- const std::size_t copy_amount{
- std::min(static_cast<std::size_t>(page_size) - page_offset, remaining_size)};
-
- switch (page_table.attributes[page_index]) {
- case Common::PageType::Memory: {
- // Flush must happen on the rasterizer interface, such that memory is always synchronous
- // when it is copied (even when in asynchronous GPU mode).
- const u8* src_ptr{page_table.pointers[page_index] + page_offset};
- rasterizer.FlushRegion(ToCacheAddr(src_ptr), copy_amount);
- WriteBlock(dest_addr, src_ptr, copy_amount);
- break;
- }
- default:
- UNREACHABLE();
- }
-
- page_index++;
- page_offset = 0;
- dest_addr += static_cast<VAddr>(copy_amount);
- src_addr += static_cast<VAddr>(copy_amount);
- remaining_size -= copy_amount;
- }
+ std::vector<u8> tmp_buffer(size);
+ ReadBlock(src_addr, tmp_buffer.data(), size);
+ WriteBlock(dest_addr, tmp_buffer.data(), size);
}
void MemoryManager::CopyBlockUnsafe(GPUVAddr dest_addr, GPUVAddr src_addr, const std::size_t size) {
@@ -371,6 +360,12 @@ void MemoryManager::CopyBlockUnsafe(GPUVAddr dest_addr, GPUVAddr src_addr, const
WriteBlockUnsafe(dest_addr, tmp_buffer.data(), size);
}
+bool MemoryManager::IsGranularRange(GPUVAddr gpu_addr, std::size_t size) {
+ const VAddr addr = page_table.backing_addr[gpu_addr >> page_bits];
+ const std::size_t page = (addr & Memory::PAGE_MASK) + size;
+ return page <= Memory::PAGE_SIZE;
+}
+
void MemoryManager::MapPages(GPUVAddr base, u64 size, u8* memory, Common::PageType type,
VAddr backing_addr) {
LOG_DEBUG(HW_GPU, "Mapping {} onto {:016X}-{:016X}", fmt::ptr(memory), base * page_size,
diff --git a/src/video_core/memory_manager.h b/src/video_core/memory_manager.h
index 073bdb491..0d9468535 100644
--- a/src/video_core/memory_manager.h
+++ b/src/video_core/memory_manager.h
@@ -97,6 +97,11 @@ public:
void WriteBlockUnsafe(GPUVAddr dest_addr, const void* src_buffer, std::size_t size);
void CopyBlockUnsafe(GPUVAddr dest_addr, GPUVAddr src_addr, std::size_t size);
+ /**
+ * IsGranularRange checks if a gpu region can be simply read with a pointer
+ */
+ bool IsGranularRange(GPUVAddr gpu_addr, std::size_t size);
+
private:
using VMAMap = std::map<GPUVAddr, VirtualMemoryArea>;
using VMAHandle = VMAMap::const_iterator;
diff --git a/src/video_core/query_cache.h b/src/video_core/query_cache.h
index e66054ed0..5ea2b01f2 100644
--- a/src/video_core/query_cache.h
+++ b/src/video_core/query_cache.h
@@ -98,12 +98,12 @@ public:
static_cast<QueryCache&>(*this),
VideoCore::QueryType::SamplesPassed}}} {}
- void InvalidateRegion(CacheAddr addr, std::size_t size) {
+ void InvalidateRegion(VAddr addr, std::size_t size) {
std::unique_lock lock{mutex};
FlushAndRemoveRegion(addr, size);
}
- void FlushRegion(CacheAddr addr, std::size_t size) {
+ void FlushRegion(VAddr addr, std::size_t size) {
std::unique_lock lock{mutex};
FlushAndRemoveRegion(addr, size);
}
@@ -117,14 +117,16 @@ public:
void Query(GPUVAddr gpu_addr, VideoCore::QueryType type, std::optional<u64> timestamp) {
std::unique_lock lock{mutex};
auto& memory_manager = system.GPU().MemoryManager();
- const auto host_ptr = memory_manager.GetPointer(gpu_addr);
+ const std::optional<VAddr> cpu_addr_opt = memory_manager.GpuToCpuAddress(gpu_addr);
+ ASSERT(cpu_addr_opt);
+ VAddr cpu_addr = *cpu_addr_opt;
- CachedQuery* query = TryGet(ToCacheAddr(host_ptr));
+ CachedQuery* query = TryGet(cpu_addr);
if (!query) {
- const auto cpu_addr = memory_manager.GpuToCpuAddress(gpu_addr);
- ASSERT_OR_EXECUTE(cpu_addr, return;);
+ ASSERT_OR_EXECUTE(cpu_addr_opt, return;);
+ const auto host_ptr = memory_manager.GetPointer(gpu_addr);
- query = Register(type, *cpu_addr, host_ptr, timestamp.has_value());
+ query = Register(type, cpu_addr, host_ptr, timestamp.has_value());
}
query->BindCounter(Stream(type).Current(), timestamp);
@@ -173,11 +175,11 @@ protected:
private:
/// Flushes a memory range to guest memory and removes it from the cache.
- void FlushAndRemoveRegion(CacheAddr addr, std::size_t size) {
+ void FlushAndRemoveRegion(VAddr addr, std::size_t size) {
const u64 addr_begin = static_cast<u64>(addr);
const u64 addr_end = addr_begin + static_cast<u64>(size);
const auto in_range = [addr_begin, addr_end](CachedQuery& query) {
- const u64 cache_begin = query.GetCacheAddr();
+ const u64 cache_begin = query.GetCpuAddr();
const u64 cache_end = cache_begin + query.SizeInBytes();
return cache_begin < addr_end && addr_begin < cache_end;
};
@@ -193,7 +195,7 @@ private:
if (!in_range(query)) {
continue;
}
- rasterizer.UpdatePagesCachedCount(query.CpuAddr(), query.SizeInBytes(), -1);
+ rasterizer.UpdatePagesCachedCount(query.GetCpuAddr(), query.SizeInBytes(), -1);
query.Flush();
}
contents.erase(std::remove_if(std::begin(contents), std::end(contents), in_range),
@@ -204,22 +206,21 @@ private:
/// Registers the passed parameters as cached and returns a pointer to the stored cached query.
CachedQuery* Register(VideoCore::QueryType type, VAddr cpu_addr, u8* host_ptr, bool timestamp) {
rasterizer.UpdatePagesCachedCount(cpu_addr, CachedQuery::SizeInBytes(timestamp), 1);
- const u64 page = static_cast<u64>(ToCacheAddr(host_ptr)) >> PAGE_SHIFT;
+ const u64 page = static_cast<u64>(cpu_addr) >> PAGE_SHIFT;
return &cached_queries[page].emplace_back(static_cast<QueryCache&>(*this), type, cpu_addr,
host_ptr);
}
/// Tries to a get a cached query. Returns nullptr on failure.
- CachedQuery* TryGet(CacheAddr addr) {
+ CachedQuery* TryGet(VAddr addr) {
const u64 page = static_cast<u64>(addr) >> PAGE_SHIFT;
const auto it = cached_queries.find(page);
if (it == std::end(cached_queries)) {
return nullptr;
}
auto& contents = it->second;
- const auto found =
- std::find_if(std::begin(contents), std::end(contents),
- [addr](auto& query) { return query.GetCacheAddr() == addr; });
+ const auto found = std::find_if(std::begin(contents), std::end(contents),
+ [addr](auto& query) { return query.GetCpuAddr() == addr; });
return found != std::end(contents) ? &*found : nullptr;
}
@@ -323,14 +324,10 @@ public:
timestamp = timestamp_;
}
- VAddr CpuAddr() const noexcept {
+ VAddr GetCpuAddr() const noexcept {
return cpu_addr;
}
- CacheAddr GetCacheAddr() const noexcept {
- return ToCacheAddr(host_ptr);
- }
-
u64 SizeInBytes() const noexcept {
return SizeInBytes(timestamp.has_value());
}
diff --git a/src/video_core/rasterizer_cache.h b/src/video_core/rasterizer_cache.h
index 6de1597a2..22987751e 100644
--- a/src/video_core/rasterizer_cache.h
+++ b/src/video_core/rasterizer_cache.h
@@ -18,22 +18,14 @@
class RasterizerCacheObject {
public:
- explicit RasterizerCacheObject(const u8* host_ptr)
- : host_ptr{host_ptr}, cache_addr{ToCacheAddr(host_ptr)} {}
+ explicit RasterizerCacheObject(const VAddr cpu_addr) : cpu_addr{cpu_addr} {}
virtual ~RasterizerCacheObject();
- CacheAddr GetCacheAddr() const {
- return cache_addr;
+ VAddr GetCpuAddr() const {
+ return cpu_addr;
}
- const u8* GetHostPtr() const {
- return host_ptr;
- }
-
- /// Gets the address of the shader in guest memory, required for cache management
- virtual VAddr GetCpuAddr() const = 0;
-
/// Gets the size of the shader in guest memory, required for cache management
virtual std::size_t GetSizeInBytes() const = 0;
@@ -68,8 +60,7 @@ private:
bool is_registered{}; ///< Whether the object is currently registered with the cache
bool is_dirty{}; ///< Whether the object is dirty (out of sync with guest memory)
u64 last_modified_ticks{}; ///< When the object was last modified, used for in-order flushing
- const u8* host_ptr{}; ///< Pointer to the memory backing this cached region
- CacheAddr cache_addr{}; ///< Cache address memory, unique from emulated virtual address space
+ VAddr cpu_addr{}; ///< Cpu address memory, unique from emulated virtual address space
};
template <class T>
@@ -80,7 +71,7 @@ public:
explicit RasterizerCache(VideoCore::RasterizerInterface& rasterizer) : rasterizer{rasterizer} {}
/// Write any cached resources overlapping the specified region back to memory
- void FlushRegion(CacheAddr addr, std::size_t size) {
+ void FlushRegion(VAddr addr, std::size_t size) {
std::lock_guard lock{mutex};
const auto& objects{GetSortedObjectsFromRegion(addr, size)};
@@ -90,7 +81,7 @@ public:
}
/// Mark the specified region as being invalidated
- void InvalidateRegion(CacheAddr addr, u64 size) {
+ void InvalidateRegion(VAddr addr, u64 size) {
std::lock_guard lock{mutex};
const auto& objects{GetSortedObjectsFromRegion(addr, size)};
@@ -114,27 +105,20 @@ public:
protected:
/// Tries to get an object from the cache with the specified cache address
- T TryGet(CacheAddr addr) const {
+ T TryGet(VAddr addr) const {
const auto iter = map_cache.find(addr);
if (iter != map_cache.end())
return iter->second;
return nullptr;
}
- T TryGet(const void* addr) const {
- const auto iter = map_cache.find(ToCacheAddr(addr));
- if (iter != map_cache.end())
- return iter->second;
- return nullptr;
- }
-
/// Register an object into the cache
virtual void Register(const T& object) {
std::lock_guard lock{mutex};
object->SetIsRegistered(true);
interval_cache.add({GetInterval(object), ObjectSet{object}});
- map_cache.insert({object->GetCacheAddr(), object});
+ map_cache.insert({object->GetCpuAddr(), object});
rasterizer.UpdatePagesCachedCount(object->GetCpuAddr(), object->GetSizeInBytes(), 1);
}
@@ -144,7 +128,7 @@ protected:
object->SetIsRegistered(false);
rasterizer.UpdatePagesCachedCount(object->GetCpuAddr(), object->GetSizeInBytes(), -1);
- const CacheAddr addr = object->GetCacheAddr();
+ const VAddr addr = object->GetCpuAddr();
interval_cache.subtract({GetInterval(object), ObjectSet{object}});
map_cache.erase(addr);
}
@@ -173,7 +157,7 @@ protected:
private:
/// Returns a list of cached objects from the specified memory region, ordered by access time
- std::vector<T> GetSortedObjectsFromRegion(CacheAddr addr, u64 size) {
+ std::vector<T> GetSortedObjectsFromRegion(VAddr addr, u64 size) {
if (size == 0) {
return {};
}
@@ -197,13 +181,13 @@ private:
}
using ObjectSet = std::set<T>;
- using ObjectCache = std::unordered_map<CacheAddr, T>;
- using IntervalCache = boost::icl::interval_map<CacheAddr, ObjectSet>;
+ using ObjectCache = std::unordered_map<VAddr, T>;
+ using IntervalCache = boost::icl::interval_map<VAddr, ObjectSet>;
using ObjectInterval = typename IntervalCache::interval_type;
static auto GetInterval(const T& object) {
- return ObjectInterval::right_open(object->GetCacheAddr(),
- object->GetCacheAddr() + object->GetSizeInBytes());
+ return ObjectInterval::right_open(object->GetCpuAddr(),
+ object->GetCpuAddr() + object->GetSizeInBytes());
}
ObjectCache map_cache;
diff --git a/src/video_core/rasterizer_interface.h b/src/video_core/rasterizer_interface.h
index 1a68e3caa..8ae5b9c4e 100644
--- a/src/video_core/rasterizer_interface.h
+++ b/src/video_core/rasterizer_interface.h
@@ -53,14 +53,14 @@ public:
virtual void FlushAll() = 0;
/// Notify rasterizer that any caches of the specified region should be flushed to Switch memory
- virtual void FlushRegion(CacheAddr addr, u64 size) = 0;
+ virtual void FlushRegion(VAddr addr, u64 size) = 0;
/// Notify rasterizer that any caches of the specified region should be invalidated
- virtual void InvalidateRegion(CacheAddr addr, u64 size) = 0;
+ virtual void InvalidateRegion(VAddr addr, u64 size) = 0;
/// Notify rasterizer that any caches of the specified region should be flushed to Switch memory
/// and invalidated
- virtual void FlushAndInvalidateRegion(CacheAddr addr, u64 size) = 0;
+ virtual void FlushAndInvalidateRegion(VAddr addr, u64 size) = 0;
/// Notify the rasterizer to send all written commands to the host GPU.
virtual void FlushCommands() = 0;
diff --git a/src/video_core/renderer_opengl/gl_buffer_cache.cpp b/src/video_core/renderer_opengl/gl_buffer_cache.cpp
index 0375fca17..4eb37a96c 100644
--- a/src/video_core/renderer_opengl/gl_buffer_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_buffer_cache.cpp
@@ -21,8 +21,8 @@ using Maxwell = Tegra::Engines::Maxwell3D::Regs;
MICROPROFILE_DEFINE(OpenGL_Buffer_Download, "OpenGL", "Buffer Download", MP_RGB(192, 192, 128));
-CachedBufferBlock::CachedBufferBlock(CacheAddr cache_addr, const std::size_t size)
- : VideoCommon::BufferBlock{cache_addr, size} {
+CachedBufferBlock::CachedBufferBlock(VAddr cpu_addr, const std::size_t size)
+ : VideoCommon::BufferBlock{cpu_addr, size} {
gl_buffer.Create();
glNamedBufferData(gl_buffer.handle, static_cast<GLsizeiptr>(size), nullptr, GL_DYNAMIC_DRAW);
}
@@ -47,8 +47,8 @@ OGLBufferCache::~OGLBufferCache() {
glDeleteBuffers(static_cast<GLsizei>(std::size(cbufs)), std::data(cbufs));
}
-Buffer OGLBufferCache::CreateBlock(CacheAddr cache_addr, std::size_t size) {
- return std::make_shared<CachedBufferBlock>(cache_addr, size);
+Buffer OGLBufferCache::CreateBlock(VAddr cpu_addr, std::size_t size) {
+ return std::make_shared<CachedBufferBlock>(cpu_addr, size);
}
void OGLBufferCache::WriteBarrier() {
diff --git a/src/video_core/renderer_opengl/gl_buffer_cache.h b/src/video_core/renderer_opengl/gl_buffer_cache.h
index 8c7145443..d94a11252 100644
--- a/src/video_core/renderer_opengl/gl_buffer_cache.h
+++ b/src/video_core/renderer_opengl/gl_buffer_cache.h
@@ -31,7 +31,7 @@ using GenericBufferCache = VideoCommon::BufferCache<Buffer, GLuint, OGLStreamBuf
class CachedBufferBlock : public VideoCommon::BufferBlock {
public:
- explicit CachedBufferBlock(CacheAddr cache_addr, const std::size_t size);
+ explicit CachedBufferBlock(VAddr cpu_addr, const std::size_t size);
~CachedBufferBlock();
const GLuint* GetHandle() const {
@@ -55,7 +55,7 @@ public:
}
protected:
- Buffer CreateBlock(CacheAddr cache_addr, std::size_t size) override;
+ Buffer CreateBlock(VAddr cpu_addr, std::size_t size) override;
void WriteBarrier() override;
diff --git a/src/video_core/renderer_opengl/gl_device.cpp b/src/video_core/renderer_opengl/gl_device.cpp
index 1a2e2a9f7..c286502ba 100644
--- a/src/video_core/renderer_opengl/gl_device.cpp
+++ b/src/video_core/renderer_opengl/gl_device.cpp
@@ -131,6 +131,31 @@ std::array<Device::BaseBindings, Tegra::Engines::MaxShaderTypes> BuildBaseBindin
return bindings;
}
+bool IsASTCSupported() {
+ static constexpr std::array formats = {
+ GL_COMPRESSED_RGBA_ASTC_4x4_KHR, GL_COMPRESSED_RGBA_ASTC_5x4_KHR,
+ GL_COMPRESSED_RGBA_ASTC_5x5_KHR, GL_COMPRESSED_RGBA_ASTC_6x5_KHR,
+ GL_COMPRESSED_RGBA_ASTC_6x6_KHR, GL_COMPRESSED_RGBA_ASTC_8x5_KHR,
+ GL_COMPRESSED_RGBA_ASTC_8x6_KHR, GL_COMPRESSED_RGBA_ASTC_8x8_KHR,
+ GL_COMPRESSED_RGBA_ASTC_10x5_KHR, GL_COMPRESSED_RGBA_ASTC_10x6_KHR,
+ GL_COMPRESSED_RGBA_ASTC_10x8_KHR, GL_COMPRESSED_RGBA_ASTC_10x10_KHR,
+ GL_COMPRESSED_RGBA_ASTC_12x10_KHR, GL_COMPRESSED_RGBA_ASTC_12x12_KHR,
+ GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x4_KHR, GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x4_KHR,
+ GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x5_KHR, GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x5_KHR,
+ GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x6_KHR, GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x5_KHR,
+ GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x6_KHR, GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x8_KHR,
+ GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x5_KHR, GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x6_KHR,
+ GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x8_KHR, GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x10_KHR,
+ GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x10_KHR, GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x12_KHR,
+ };
+ return std::find_if_not(formats.begin(), formats.end(), [](GLenum format) {
+ GLint supported;
+ glGetInternalformativ(GL_TEXTURE_2D, format, GL_INTERNALFORMAT_SUPPORTED, 1,
+ &supported);
+ return supported == GL_TRUE;
+ }) == formats.end();
+}
+
} // Anonymous namespace
Device::Device() : base_bindings{BuildBaseBindings()} {
@@ -152,6 +177,7 @@ Device::Device() : base_bindings{BuildBaseBindings()} {
has_shader_ballot = GLAD_GL_ARB_shader_ballot;
has_vertex_viewport_layer = GLAD_GL_ARB_shader_viewport_layer_array;
has_image_load_formatted = HasExtension(extensions, "GL_EXT_shader_image_load_formatted");
+ has_astc = IsASTCSupported();
has_variable_aoffi = TestVariableAoffi();
has_component_indexing_bug = is_amd;
has_precise_bug = TestPreciseBug();
diff --git a/src/video_core/renderer_opengl/gl_device.h b/src/video_core/renderer_opengl/gl_device.h
index d73b099d0..a55050cb5 100644
--- a/src/video_core/renderer_opengl/gl_device.h
+++ b/src/video_core/renderer_opengl/gl_device.h
@@ -64,6 +64,10 @@ public:
return has_image_load_formatted;
}
+ bool HasASTC() const {
+ return has_astc;
+ }
+
bool HasVariableAoffi() const {
return has_variable_aoffi;
}
@@ -97,6 +101,7 @@ private:
bool has_shader_ballot{};
bool has_vertex_viewport_layer{};
bool has_image_load_formatted{};
+ bool has_astc{};
bool has_variable_aoffi{};
bool has_component_indexing_bug{};
bool has_precise_bug{};
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp
index 31add708f..f4598fbf7 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer.cpp
+++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp
@@ -140,8 +140,8 @@ void RasterizerOpenGL::SetupVertexFormat() {
const auto attrib = gpu.regs.vertex_attrib_format[index];
const auto gl_index = static_cast<GLuint>(index);
- // Ignore invalid attributes.
- if (!attrib.IsValid()) {
+ // Disable constant attributes.
+ if (attrib.IsConstant()) {
glDisableVertexAttribArray(gl_index);
continue;
}
@@ -345,7 +345,7 @@ void RasterizerOpenGL::ConfigureFramebuffers() {
texture_cache.GuardRenderTargets(true);
- View depth_surface = texture_cache.GetDepthBufferSurface(true);
+ View depth_surface = texture_cache.GetDepthBufferSurface();
const auto& regs = gpu.regs;
UNIMPLEMENTED_IF(regs.rt_separate_frag_data == 0);
@@ -354,7 +354,7 @@ void RasterizerOpenGL::ConfigureFramebuffers() {
FramebufferCacheKey key;
const auto colors_count = static_cast<std::size_t>(regs.rt_control.count);
for (std::size_t index = 0; index < colors_count; ++index) {
- View color_surface{texture_cache.GetColorBufferSurface(index, true)};
+ View color_surface{texture_cache.GetColorBufferSurface(index)};
if (!color_surface) {
continue;
}
@@ -386,11 +386,14 @@ void RasterizerOpenGL::ConfigureClearFramebuffer(bool using_color_fb, bool using
texture_cache.GuardRenderTargets(true);
View color_surface;
if (using_color_fb) {
- color_surface = texture_cache.GetColorBufferSurface(regs.clear_buffers.RT, false);
+ const std::size_t index = regs.clear_buffers.RT;
+ color_surface = texture_cache.GetColorBufferSurface(index);
+ texture_cache.MarkColorBufferInUse(index);
}
View depth_surface;
if (using_depth_fb || using_stencil_fb) {
- depth_surface = texture_cache.GetDepthBufferSurface(false);
+ depth_surface = texture_cache.GetDepthBufferSurface();
+ texture_cache.MarkDepthBufferInUse();
}
texture_cache.GuardRenderTargets(false);
@@ -493,6 +496,7 @@ void RasterizerOpenGL::Draw(bool is_indexed, bool is_instanced) {
SyncPrimitiveRestart();
SyncScissorTest();
SyncPointState();
+ SyncLineState();
SyncPolygonOffset();
SyncAlphaTest();
SyncFramebufferSRGB();
@@ -653,9 +657,9 @@ void RasterizerOpenGL::Query(GPUVAddr gpu_addr, VideoCore::QueryType type,
void RasterizerOpenGL::FlushAll() {}
-void RasterizerOpenGL::FlushRegion(CacheAddr addr, u64 size) {
+void RasterizerOpenGL::FlushRegion(VAddr addr, u64 size) {
MICROPROFILE_SCOPE(OpenGL_CacheManagement);
- if (!addr || !size) {
+ if (addr == 0 || size == 0) {
return;
}
texture_cache.FlushRegion(addr, size);
@@ -663,9 +667,9 @@ void RasterizerOpenGL::FlushRegion(CacheAddr addr, u64 size) {
query_cache.FlushRegion(addr, size);
}
-void RasterizerOpenGL::InvalidateRegion(CacheAddr addr, u64 size) {
+void RasterizerOpenGL::InvalidateRegion(VAddr addr, u64 size) {
MICROPROFILE_SCOPE(OpenGL_CacheManagement);
- if (!addr || !size) {
+ if (addr == 0 || size == 0) {
return;
}
texture_cache.InvalidateRegion(addr, size);
@@ -674,7 +678,7 @@ void RasterizerOpenGL::InvalidateRegion(CacheAddr addr, u64 size) {
query_cache.InvalidateRegion(addr, size);
}
-void RasterizerOpenGL::FlushAndInvalidateRegion(CacheAddr addr, u64 size) {
+void RasterizerOpenGL::FlushAndInvalidateRegion(VAddr addr, u64 size) {
if (Settings::values.use_accurate_gpu_emulation) {
FlushRegion(addr, size);
}
@@ -713,8 +717,7 @@ bool RasterizerOpenGL::AccelerateDisplay(const Tegra::FramebufferConfig& config,
MICROPROFILE_SCOPE(OpenGL_CacheManagement);
- const auto surface{
- texture_cache.TryFindFramebufferSurface(system.Memory().GetPointer(framebuffer_addr))};
+ const auto surface{texture_cache.TryFindFramebufferSurface(framebuffer_addr)};
if (!surface) {
return {};
}
@@ -1309,6 +1312,19 @@ void RasterizerOpenGL::SyncPointState() {
glDisable(GL_PROGRAM_POINT_SIZE);
}
+void RasterizerOpenGL::SyncLineState() {
+ auto& gpu = system.GPU().Maxwell3D();
+ auto& flags = gpu.dirty.flags;
+ if (!flags[Dirty::LineWidth]) {
+ return;
+ }
+ flags[Dirty::LineWidth] = false;
+
+ const auto& regs = gpu.regs;
+ oglEnable(GL_LINE_SMOOTH, regs.line_smooth_enable);
+ glLineWidth(regs.line_smooth_enable ? regs.line_width_smooth : regs.line_width_aliased);
+}
+
void RasterizerOpenGL::SyncPolygonOffset() {
auto& gpu = system.GPU().Maxwell3D();
auto& flags = gpu.dirty.flags;
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.h b/src/video_core/renderer_opengl/gl_rasterizer.h
index 2d3be2437..435da4425 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer.h
+++ b/src/video_core/renderer_opengl/gl_rasterizer.h
@@ -65,9 +65,9 @@ public:
void ResetCounter(VideoCore::QueryType type) override;
void Query(GPUVAddr gpu_addr, VideoCore::QueryType type, std::optional<u64> timestamp) override;
void FlushAll() override;
- void FlushRegion(CacheAddr addr, u64 size) override;
- void InvalidateRegion(CacheAddr addr, u64 size) override;
- void FlushAndInvalidateRegion(CacheAddr addr, u64 size) override;
+ void FlushRegion(VAddr addr, u64 size) override;
+ void InvalidateRegion(VAddr addr, u64 size) override;
+ void FlushAndInvalidateRegion(VAddr addr, u64 size) override;
void FlushCommands() override;
void TickFrame() override;
bool AccelerateSurfaceCopy(const Tegra::Engines::Fermi2D::Regs::Surface& src,
@@ -171,6 +171,9 @@ private:
/// Syncs the point state to match the guest state
void SyncPointState();
+ /// Syncs the line state to match the guest state
+ void SyncLineState();
+
/// Syncs the rasterizer enable state to match the guest state
void SyncRasterizeEnable();
diff --git a/src/video_core/renderer_opengl/gl_shader_cache.cpp b/src/video_core/renderer_opengl/gl_shader_cache.cpp
index 046ee55a5..12c6dcfde 100644
--- a/src/video_core/renderer_opengl/gl_shader_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_shader_cache.cpp
@@ -34,6 +34,8 @@
namespace OpenGL {
using Tegra::Engines::ShaderType;
+using VideoCommon::Shader::CompileDepth;
+using VideoCommon::Shader::CompilerSettings;
using VideoCommon::Shader::ProgramCode;
using VideoCommon::Shader::Registry;
using VideoCommon::Shader::ShaderIR;
@@ -43,7 +45,7 @@ namespace {
constexpr u32 STAGE_MAIN_OFFSET = 10;
constexpr u32 KERNEL_MAIN_OFFSET = 0;
-constexpr VideoCommon::Shader::CompilerSettings COMPILER_SETTINGS{};
+constexpr CompilerSettings COMPILER_SETTINGS{CompileDepth::FullDecompile};
/// Gets the address for the specified shader stage program
GPUVAddr GetShaderAddress(Core::System& system, Maxwell::ShaderProgram program) {
@@ -214,11 +216,11 @@ std::unordered_set<GLenum> GetSupportedFormats() {
} // Anonymous namespace
-CachedShader::CachedShader(const u8* host_ptr, VAddr cpu_addr, std::size_t size_in_bytes,
+CachedShader::CachedShader(VAddr cpu_addr, std::size_t size_in_bytes,
std::shared_ptr<VideoCommon::Shader::Registry> registry,
ShaderEntries entries, std::shared_ptr<OGLProgram> program)
- : RasterizerCacheObject{host_ptr}, registry{std::move(registry)}, entries{std::move(entries)},
- cpu_addr{cpu_addr}, size_in_bytes{size_in_bytes}, program{std::move(program)} {}
+ : RasterizerCacheObject{cpu_addr}, registry{std::move(registry)}, entries{std::move(entries)},
+ size_in_bytes{size_in_bytes}, program{std::move(program)} {}
CachedShader::~CachedShader() = default;
@@ -254,9 +256,8 @@ Shader CachedShader::CreateStageFromMemory(const ShaderParameters& params,
entry.bindless_samplers = registry->GetBindlessSamplers();
params.disk_cache.SaveEntry(std::move(entry));
- return std::shared_ptr<CachedShader>(new CachedShader(params.host_ptr, params.cpu_addr,
- size_in_bytes, std::move(registry),
- MakeEntries(ir), std::move(program)));
+ return std::shared_ptr<CachedShader>(new CachedShader(
+ params.cpu_addr, size_in_bytes, std::move(registry), MakeEntries(ir), std::move(program)));
}
Shader CachedShader::CreateKernelFromMemory(const ShaderParameters& params, ProgramCode code) {
@@ -279,17 +280,16 @@ Shader CachedShader::CreateKernelFromMemory(const ShaderParameters& params, Prog
entry.bindless_samplers = registry->GetBindlessSamplers();
params.disk_cache.SaveEntry(std::move(entry));
- return std::shared_ptr<CachedShader>(new CachedShader(params.host_ptr, params.cpu_addr,
- size_in_bytes, std::move(registry),
- MakeEntries(ir), std::move(program)));
+ return std::shared_ptr<CachedShader>(new CachedShader(
+ params.cpu_addr, size_in_bytes, std::move(registry), MakeEntries(ir), std::move(program)));
}
Shader CachedShader::CreateFromCache(const ShaderParameters& params,
const PrecompiledShader& precompiled_shader,
std::size_t size_in_bytes) {
- return std::shared_ptr<CachedShader>(new CachedShader(
- params.host_ptr, params.cpu_addr, size_in_bytes, precompiled_shader.registry,
- precompiled_shader.entries, precompiled_shader.program));
+ return std::shared_ptr<CachedShader>(
+ new CachedShader(params.cpu_addr, size_in_bytes, precompiled_shader.registry,
+ precompiled_shader.entries, precompiled_shader.program));
}
ShaderCacheOpenGL::ShaderCacheOpenGL(RasterizerOpenGL& rasterizer, Core::System& system,
@@ -449,12 +449,14 @@ Shader ShaderCacheOpenGL::GetStageProgram(Maxwell::ShaderProgram program) {
const GPUVAddr address{GetShaderAddress(system, program)};
// Look up shader in the cache based on address
- const auto host_ptr{memory_manager.GetPointer(address)};
- Shader shader{TryGet(host_ptr)};
+ const auto cpu_addr{memory_manager.GpuToCpuAddress(address)};
+ Shader shader{cpu_addr ? TryGet(*cpu_addr) : nullptr};
if (shader) {
return last_shaders[static_cast<std::size_t>(program)] = shader;
}
+ const auto host_ptr{memory_manager.GetPointer(address)};
+
// No shader found - create a new one
ProgramCode code{GetShaderCode(memory_manager, address, host_ptr)};
ProgramCode code_b;
@@ -465,9 +467,9 @@ Shader ShaderCacheOpenGL::GetStageProgram(Maxwell::ShaderProgram program) {
const auto unique_identifier = GetUniqueIdentifier(
GetShaderType(program), program == Maxwell::ShaderProgram::VertexA, code, code_b);
- const auto cpu_addr{*memory_manager.GpuToCpuAddress(address)};
- const ShaderParameters params{system, disk_cache, device,
- cpu_addr, host_ptr, unique_identifier};
+
+ const ShaderParameters params{system, disk_cache, device,
+ *cpu_addr, host_ptr, unique_identifier};
const auto found = runtime_cache.find(unique_identifier);
if (found == runtime_cache.end()) {
@@ -484,18 +486,20 @@ Shader ShaderCacheOpenGL::GetStageProgram(Maxwell::ShaderProgram program) {
Shader ShaderCacheOpenGL::GetComputeKernel(GPUVAddr code_addr) {
auto& memory_manager{system.GPU().MemoryManager()};
- const auto host_ptr{memory_manager.GetPointer(code_addr)};
- auto kernel = TryGet(host_ptr);
+ const auto cpu_addr{memory_manager.GpuToCpuAddress(code_addr)};
+
+ auto kernel = cpu_addr ? TryGet(*cpu_addr) : nullptr;
if (kernel) {
return kernel;
}
+ const auto host_ptr{memory_manager.GetPointer(code_addr)};
// No kernel found, create a new one
auto code{GetShaderCode(memory_manager, code_addr, host_ptr)};
const auto unique_identifier{GetUniqueIdentifier(ShaderType::Compute, false, code)};
- const auto cpu_addr{*memory_manager.GpuToCpuAddress(code_addr)};
- const ShaderParameters params{system, disk_cache, device,
- cpu_addr, host_ptr, unique_identifier};
+
+ const ShaderParameters params{system, disk_cache, device,
+ *cpu_addr, host_ptr, unique_identifier};
const auto found = runtime_cache.find(unique_identifier);
if (found == runtime_cache.end()) {
diff --git a/src/video_core/renderer_opengl/gl_shader_cache.h b/src/video_core/renderer_opengl/gl_shader_cache.h
index 4935019fc..c836df5bd 100644
--- a/src/video_core/renderer_opengl/gl_shader_cache.h
+++ b/src/video_core/renderer_opengl/gl_shader_cache.h
@@ -65,11 +65,6 @@ public:
/// Gets the GL program handle for the shader
GLuint GetHandle() const;
- /// Returns the guest CPU address of the shader
- VAddr GetCpuAddr() const override {
- return cpu_addr;
- }
-
/// Returns the size in bytes of the shader
std::size_t GetSizeInBytes() const override {
return size_in_bytes;
@@ -90,13 +85,12 @@ public:
std::size_t size_in_bytes);
private:
- explicit CachedShader(const u8* host_ptr, VAddr cpu_addr, std::size_t size_in_bytes,
+ explicit CachedShader(VAddr cpu_addr, std::size_t size_in_bytes,
std::shared_ptr<VideoCommon::Shader::Registry> registry,
ShaderEntries entries, std::shared_ptr<OGLProgram> program);
std::shared_ptr<VideoCommon::Shader::Registry> registry;
ShaderEntries entries;
- VAddr cpu_addr = 0;
std::size_t size_in_bytes = 0;
std::shared_ptr<OGLProgram> program;
};
diff --git a/src/video_core/renderer_opengl/gl_shader_decompiler.cpp b/src/video_core/renderer_opengl/gl_shader_decompiler.cpp
index a25280a47..b1804e9ea 100644
--- a/src/video_core/renderer_opengl/gl_shader_decompiler.cpp
+++ b/src/video_core/renderer_opengl/gl_shader_decompiler.cpp
@@ -31,11 +31,11 @@ namespace {
using Tegra::Engines::ShaderType;
using Tegra::Shader::Attribute;
-using Tegra::Shader::AttributeUse;
using Tegra::Shader::Header;
using Tegra::Shader::IpaInterpMode;
using Tegra::Shader::IpaMode;
using Tegra::Shader::IpaSampleMode;
+using Tegra::Shader::PixelImap;
using Tegra::Shader::Register;
using VideoCommon::Shader::BuildTransformFeedback;
using VideoCommon::Shader::Registry;
@@ -702,20 +702,19 @@ private:
code.AddNewLine();
}
- std::string GetInputFlags(AttributeUse attribute) {
+ const char* GetInputFlags(PixelImap attribute) {
switch (attribute) {
- case AttributeUse::Perspective:
- // Default, Smooth
- return {};
- case AttributeUse::Constant:
- return "flat ";
- case AttributeUse::ScreenLinear:
- return "noperspective ";
- default:
- case AttributeUse::Unused:
- UNIMPLEMENTED_MSG("Unknown attribute usage index={}", static_cast<u32>(attribute));
- return {};
+ case PixelImap::Perspective:
+ return "smooth";
+ case PixelImap::Constant:
+ return "flat";
+ case PixelImap::ScreenLinear:
+ return "noperspective";
+ case PixelImap::Unused:
+ break;
}
+ UNIMPLEMENTED_MSG("Unknown attribute usage index={}", static_cast<int>(attribute));
+ return {};
}
void DeclareInputAttributes() {
@@ -749,8 +748,8 @@ private:
std::string suffix;
if (stage == ShaderType::Fragment) {
- const auto input_mode{header.ps.GetAttributeUse(location)};
- if (skip_unused && input_mode == AttributeUse::Unused) {
+ const auto input_mode{header.ps.GetPixelImap(location)};
+ if (input_mode == PixelImap::Unused) {
return;
}
suffix = GetInputFlags(input_mode);
@@ -927,7 +926,7 @@ private:
const u32 address{generic_base + index * generic_stride + element * element_stride};
const bool declared = stage != ShaderType::Fragment ||
- header.ps.GetAttributeUse(index) != AttributeUse::Unused;
+ header.ps.GetPixelImap(index) != PixelImap::Unused;
const std::string value =
declared ? ReadAttribute(attribute, element).AsFloat() : "0.0f";
code.AddLine("case 0x{:X}U: return {};", address, value);
@@ -1142,8 +1141,7 @@ private:
GetSwizzle(element)),
Type::Float};
case ShaderType::Fragment:
- return {element == 3 ? "1.0f" : ("gl_FragCoord"s + GetSwizzle(element)),
- Type::Float};
+ return {"gl_FragCoord"s + GetSwizzle(element), Type::Float};
default:
UNREACHABLE();
}
@@ -1821,15 +1819,17 @@ private:
}
Expression HMergeH0(Operation operation) {
- std::string dest = VisitOperand(operation, 0).AsUint();
- std::string src = VisitOperand(operation, 1).AsUint();
- return {fmt::format("(({} & 0x0000FFFFU) | ({} & 0xFFFF0000U))", src, dest), Type::Uint};
+ const std::string dest = VisitOperand(operation, 0).AsUint();
+ const std::string src = VisitOperand(operation, 1).AsUint();
+ return {fmt::format("vec2(unpackHalf2x16({}).x, unpackHalf2x16({}).y)", src, dest),
+ Type::HalfFloat};
}
Expression HMergeH1(Operation operation) {
- std::string dest = VisitOperand(operation, 0).AsUint();
- std::string src = VisitOperand(operation, 1).AsUint();
- return {fmt::format("(({} & 0x0000FFFFU) | ({} & 0xFFFF0000U))", dest, src), Type::Uint};
+ const std::string dest = VisitOperand(operation, 0).AsUint();
+ const std::string src = VisitOperand(operation, 1).AsUint();
+ return {fmt::format("vec2(unpackHalf2x16({}).x, unpackHalf2x16({}).y)", dest, src),
+ Type::HalfFloat};
}
Expression HPack2(Operation operation) {
diff --git a/src/video_core/renderer_opengl/gl_state_tracker.cpp b/src/video_core/renderer_opengl/gl_state_tracker.cpp
index 255ac3147..d24fad3de 100644
--- a/src/video_core/renderer_opengl/gl_state_tracker.cpp
+++ b/src/video_core/renderer_opengl/gl_state_tracker.cpp
@@ -185,6 +185,12 @@ void SetupDirtyPointSize(Tables& tables) {
tables[0][OFF(point_sprite_enable)] = PointSize;
}
+void SetupDirtyLineWidth(Tables& tables) {
+ tables[0][OFF(line_width_smooth)] = LineWidth;
+ tables[0][OFF(line_width_aliased)] = LineWidth;
+ tables[0][OFF(line_smooth_enable)] = LineWidth;
+}
+
void SetupDirtyClipControl(Tables& tables) {
auto& table = tables[0];
table[OFF(screen_y_control)] = ClipControl;
@@ -233,6 +239,7 @@ void StateTracker::Initialize() {
SetupDirtyLogicOp(tables);
SetupDirtyFragmentClampColor(tables);
SetupDirtyPointSize(tables);
+ SetupDirtyLineWidth(tables);
SetupDirtyClipControl(tables);
SetupDirtyDepthClampEnabled(tables);
SetupDirtyMisc(tables);
diff --git a/src/video_core/renderer_opengl/gl_state_tracker.h b/src/video_core/renderer_opengl/gl_state_tracker.h
index b882d75c3..0f823288e 100644
--- a/src/video_core/renderer_opengl/gl_state_tracker.h
+++ b/src/video_core/renderer_opengl/gl_state_tracker.h
@@ -78,6 +78,7 @@ enum : u8 {
LogicOp,
FragmentClampColor,
PointSize,
+ LineWidth,
ClipControl,
DepthClampEnabled,
diff --git a/src/video_core/renderer_opengl/gl_texture_cache.cpp b/src/video_core/renderer_opengl/gl_texture_cache.cpp
index f424e3000..2729d1265 100644
--- a/src/video_core/renderer_opengl/gl_texture_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_texture_cache.cpp
@@ -24,7 +24,6 @@ using Tegra::Texture::SwizzleSource;
using VideoCore::MortonSwizzleMode;
using VideoCore::Surface::PixelFormat;
-using VideoCore::Surface::SurfaceCompression;
using VideoCore::Surface::SurfaceTarget;
using VideoCore::Surface::SurfaceType;
@@ -37,102 +36,100 @@ namespace {
struct FormatTuple {
GLint internal_format;
- GLenum format;
- GLenum type;
- bool compressed;
+ GLenum format = GL_NONE;
+ GLenum type = GL_NONE;
};
constexpr std::array<FormatTuple, VideoCore::Surface::MaxPixelFormat> tex_format_tuples = {{
- {GL_RGBA8, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8_REV, false}, // ABGR8U
- {GL_RGBA8_SNORM, GL_RGBA, GL_BYTE, false}, // ABGR8S
- {GL_RGBA8UI, GL_RGBA_INTEGER, GL_UNSIGNED_BYTE, false}, // ABGR8UI
- {GL_RGB565, GL_RGB, GL_UNSIGNED_SHORT_5_6_5_REV, false}, // B5G6R5U
- {GL_RGB10_A2, GL_RGBA, GL_UNSIGNED_INT_2_10_10_10_REV, false}, // A2B10G10R10U
- {GL_RGB5_A1, GL_RGBA, GL_UNSIGNED_SHORT_1_5_5_5_REV, false}, // A1B5G5R5U
- {GL_R8, GL_RED, GL_UNSIGNED_BYTE, false}, // R8U
- {GL_R8UI, GL_RED_INTEGER, GL_UNSIGNED_BYTE, false}, // R8UI
- {GL_RGBA16F, GL_RGBA, GL_HALF_FLOAT, false}, // RGBA16F
- {GL_RGBA16, GL_RGBA, GL_UNSIGNED_SHORT, false}, // RGBA16U
- {GL_RGBA16_SNORM, GL_RGBA, GL_SHORT, false}, // RGBA16S
- {GL_RGBA16UI, GL_RGBA_INTEGER, GL_UNSIGNED_SHORT, false}, // RGBA16UI
- {GL_R11F_G11F_B10F, GL_RGB, GL_UNSIGNED_INT_10F_11F_11F_REV, false}, // R11FG11FB10F
- {GL_RGBA32UI, GL_RGBA_INTEGER, GL_UNSIGNED_INT, false}, // RGBA32UI
- {GL_COMPRESSED_RGBA_S3TC_DXT1_EXT, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, true}, // DXT1
- {GL_COMPRESSED_RGBA_S3TC_DXT3_EXT, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, true}, // DXT23
- {GL_COMPRESSED_RGBA_S3TC_DXT5_EXT, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, true}, // DXT45
- {GL_COMPRESSED_RED_RGTC1, GL_RED, GL_UNSIGNED_INT_8_8_8_8, true}, // DXN1
- {GL_COMPRESSED_RG_RGTC2, GL_RG, GL_UNSIGNED_INT_8_8_8_8, true}, // DXN2UNORM
- {GL_COMPRESSED_SIGNED_RG_RGTC2, GL_RG, GL_INT, true}, // DXN2SNORM
- {GL_COMPRESSED_RGBA_BPTC_UNORM, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, true}, // BC7U
- {GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT, GL_RGB, GL_UNSIGNED_INT_8_8_8_8, true}, // BC6H_UF16
- {GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT, GL_RGB, GL_UNSIGNED_INT_8_8_8_8, true}, // BC6H_SF16
- {GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_4X4
- {GL_RGBA8, GL_BGRA, GL_UNSIGNED_BYTE, false}, // BGRA8
- {GL_RGBA32F, GL_RGBA, GL_FLOAT, false}, // RGBA32F
- {GL_RG32F, GL_RG, GL_FLOAT, false}, // RG32F
- {GL_R32F, GL_RED, GL_FLOAT, false}, // R32F
- {GL_R16F, GL_RED, GL_HALF_FLOAT, false}, // R16F
- {GL_R16, GL_RED, GL_UNSIGNED_SHORT, false}, // R16U
- {GL_R16_SNORM, GL_RED, GL_SHORT, false}, // R16S
- {GL_R16UI, GL_RED_INTEGER, GL_UNSIGNED_SHORT, false}, // R16UI
- {GL_R16I, GL_RED_INTEGER, GL_SHORT, false}, // R16I
- {GL_RG16, GL_RG, GL_UNSIGNED_SHORT, false}, // RG16
- {GL_RG16F, GL_RG, GL_HALF_FLOAT, false}, // RG16F
- {GL_RG16UI, GL_RG_INTEGER, GL_UNSIGNED_SHORT, false}, // RG16UI
- {GL_RG16I, GL_RG_INTEGER, GL_SHORT, false}, // RG16I
- {GL_RG16_SNORM, GL_RG, GL_SHORT, false}, // RG16S
- {GL_RGB32F, GL_RGB, GL_FLOAT, false}, // RGB32F
- {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8_REV, false}, // RGBA8_SRGB
- {GL_RG8, GL_RG, GL_UNSIGNED_BYTE, false}, // RG8U
- {GL_RG8_SNORM, GL_RG, GL_BYTE, false}, // RG8S
- {GL_RG32UI, GL_RG_INTEGER, GL_UNSIGNED_INT, false}, // RG32UI
- {GL_RGB16F, GL_RGBA, GL_HALF_FLOAT, false}, // RGBX16F
- {GL_R32UI, GL_RED_INTEGER, GL_UNSIGNED_INT, false}, // R32UI
- {GL_R32I, GL_RED_INTEGER, GL_INT, false}, // R32I
- {GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_8X8
- {GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_8X5
- {GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_5X4
- {GL_SRGB8_ALPHA8, GL_BGRA, GL_UNSIGNED_BYTE, false}, // BGRA8
+ {GL_RGBA8, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8_REV}, // ABGR8U
+ {GL_RGBA8_SNORM, GL_RGBA, GL_BYTE}, // ABGR8S
+ {GL_RGBA8UI, GL_RGBA_INTEGER, GL_UNSIGNED_BYTE}, // ABGR8UI
+ {GL_RGB565, GL_RGB, GL_UNSIGNED_SHORT_5_6_5_REV}, // B5G6R5U
+ {GL_RGB10_A2, GL_RGBA, GL_UNSIGNED_INT_2_10_10_10_REV}, // A2B10G10R10U
+ {GL_RGB5_A1, GL_RGBA, GL_UNSIGNED_SHORT_1_5_5_5_REV}, // A1B5G5R5U
+ {GL_R8, GL_RED, GL_UNSIGNED_BYTE}, // R8U
+ {GL_R8UI, GL_RED_INTEGER, GL_UNSIGNED_BYTE}, // R8UI
+ {GL_RGBA16F, GL_RGBA, GL_HALF_FLOAT}, // RGBA16F
+ {GL_RGBA16, GL_RGBA, GL_UNSIGNED_SHORT}, // RGBA16U
+ {GL_RGBA16_SNORM, GL_RGBA, GL_SHORT}, // RGBA16S
+ {GL_RGBA16UI, GL_RGBA_INTEGER, GL_UNSIGNED_SHORT}, // RGBA16UI
+ {GL_R11F_G11F_B10F, GL_RGB, GL_UNSIGNED_INT_10F_11F_11F_REV}, // R11FG11FB10F
+ {GL_RGBA32UI, GL_RGBA_INTEGER, GL_UNSIGNED_INT}, // RGBA32UI
+ {GL_COMPRESSED_RGBA_S3TC_DXT1_EXT}, // DXT1
+ {GL_COMPRESSED_RGBA_S3TC_DXT3_EXT}, // DXT23
+ {GL_COMPRESSED_RGBA_S3TC_DXT5_EXT}, // DXT45
+ {GL_COMPRESSED_RED_RGTC1}, // DXN1
+ {GL_COMPRESSED_RG_RGTC2}, // DXN2UNORM
+ {GL_COMPRESSED_SIGNED_RG_RGTC2}, // DXN2SNORM
+ {GL_COMPRESSED_RGBA_BPTC_UNORM}, // BC7U
+ {GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT}, // BC6H_UF16
+ {GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT}, // BC6H_SF16
+ {GL_COMPRESSED_RGBA_ASTC_4x4_KHR}, // ASTC_2D_4X4
+ {GL_RGBA8, GL_BGRA, GL_UNSIGNED_BYTE}, // BGRA8
+ {GL_RGBA32F, GL_RGBA, GL_FLOAT}, // RGBA32F
+ {GL_RG32F, GL_RG, GL_FLOAT}, // RG32F
+ {GL_R32F, GL_RED, GL_FLOAT}, // R32F
+ {GL_R16F, GL_RED, GL_HALF_FLOAT}, // R16F
+ {GL_R16, GL_RED, GL_UNSIGNED_SHORT}, // R16U
+ {GL_R16_SNORM, GL_RED, GL_SHORT}, // R16S
+ {GL_R16UI, GL_RED_INTEGER, GL_UNSIGNED_SHORT}, // R16UI
+ {GL_R16I, GL_RED_INTEGER, GL_SHORT}, // R16I
+ {GL_RG16, GL_RG, GL_UNSIGNED_SHORT}, // RG16
+ {GL_RG16F, GL_RG, GL_HALF_FLOAT}, // RG16F
+ {GL_RG16UI, GL_RG_INTEGER, GL_UNSIGNED_SHORT}, // RG16UI
+ {GL_RG16I, GL_RG_INTEGER, GL_SHORT}, // RG16I
+ {GL_RG16_SNORM, GL_RG, GL_SHORT}, // RG16S
+ {GL_RGB32F, GL_RGB, GL_FLOAT}, // RGB32F
+ {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8_REV}, // RGBA8_SRGB
+ {GL_RG8, GL_RG, GL_UNSIGNED_BYTE}, // RG8U
+ {GL_RG8_SNORM, GL_RG, GL_BYTE}, // RG8S
+ {GL_RG32UI, GL_RG_INTEGER, GL_UNSIGNED_INT}, // RG32UI
+ {GL_RGB16F, GL_RGBA, GL_HALF_FLOAT}, // RGBX16F
+ {GL_R32UI, GL_RED_INTEGER, GL_UNSIGNED_INT}, // R32UI
+ {GL_R32I, GL_RED_INTEGER, GL_INT}, // R32I
+ {GL_COMPRESSED_RGBA_ASTC_8x8_KHR}, // ASTC_2D_8X8
+ {GL_COMPRESSED_RGBA_ASTC_8x5_KHR}, // ASTC_2D_8X5
+ {GL_COMPRESSED_RGBA_ASTC_5x4_KHR}, // ASTC_2D_5X4
+ {GL_SRGB8_ALPHA8, GL_BGRA, GL_UNSIGNED_BYTE}, // BGRA8
// Compressed sRGB formats
- {GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, true}, // DXT1_SRGB
- {GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, true}, // DXT23_SRGB
- {GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, true}, // DXT45_SRGB
- {GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, true}, // BC7U_SRGB
- {GL_RGBA4, GL_RGBA, GL_UNSIGNED_SHORT_4_4_4_4_REV, false}, // R4G4B4A4U
- {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_4X4_SRGB
- {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_8X8_SRGB
- {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_8X5_SRGB
- {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_5X4_SRGB
- {GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_5X5
- {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_5X5_SRGB
- {GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_10X8
- {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_10X8_SRGB
- {GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_6X6
- {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_6X6_SRGB
- {GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_10X10
- {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_10X10_SRGB
- {GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_12X12
- {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_12X12_SRGB
- {GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_8X6
- {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_8X6_SRGB
- {GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_6X5
- {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_6X5_SRGB
- {GL_RGB9_E5, GL_RGB, GL_UNSIGNED_INT_5_9_9_9_REV, false}, // E5B9G9R9F
+ {GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT}, // DXT1_SRGB
+ {GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT}, // DXT23_SRGB
+ {GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT}, // DXT45_SRGB
+ {GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM}, // BC7U_SRGB
+ {GL_RGBA4, GL_RGBA, GL_UNSIGNED_SHORT_4_4_4_4_REV}, // R4G4B4A4U
+ {GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x4_KHR}, // ASTC_2D_4X4_SRGB
+ {GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x8_KHR}, // ASTC_2D_8X8_SRGB
+ {GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x5_KHR}, // ASTC_2D_8X5_SRGB
+ {GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x4_KHR}, // ASTC_2D_5X4_SRGB
+ {GL_COMPRESSED_RGBA_ASTC_5x5_KHR}, // ASTC_2D_5X5
+ {GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x5_KHR}, // ASTC_2D_5X5_SRGB
+ {GL_COMPRESSED_RGBA_ASTC_10x8_KHR}, // ASTC_2D_10X8
+ {GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x8_KHR}, // ASTC_2D_10X8_SRGB
+ {GL_COMPRESSED_RGBA_ASTC_6x6_KHR}, // ASTC_2D_6X6
+ {GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x6_KHR}, // ASTC_2D_6X6_SRGB
+ {GL_COMPRESSED_RGBA_ASTC_10x10_KHR}, // ASTC_2D_10X10
+ {GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x10_KHR}, // ASTC_2D_10X10_SRGB
+ {GL_COMPRESSED_RGBA_ASTC_12x12_KHR}, // ASTC_2D_12X12
+ {GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x12_KHR}, // ASTC_2D_12X12_SRGB
+ {GL_COMPRESSED_RGBA_ASTC_8x6_KHR}, // ASTC_2D_8X6
+ {GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x6_KHR}, // ASTC_2D_8X6_SRGB
+ {GL_COMPRESSED_RGBA_ASTC_6x5_KHR}, // ASTC_2D_6X5
+ {GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x5_KHR}, // ASTC_2D_6X5_SRGB
+ {GL_RGB9_E5, GL_RGB, GL_UNSIGNED_INT_5_9_9_9_REV}, // E5B9G9R9F
// Depth formats
- {GL_DEPTH_COMPONENT32F, GL_DEPTH_COMPONENT, GL_FLOAT, false}, // Z32F
- {GL_DEPTH_COMPONENT16, GL_DEPTH_COMPONENT, GL_UNSIGNED_SHORT, false}, // Z16
+ {GL_DEPTH_COMPONENT32F, GL_DEPTH_COMPONENT, GL_FLOAT}, // Z32F
+ {GL_DEPTH_COMPONENT16, GL_DEPTH_COMPONENT, GL_UNSIGNED_SHORT}, // Z16
// DepthStencil formats
- {GL_DEPTH24_STENCIL8, GL_DEPTH_STENCIL, GL_UNSIGNED_INT_24_8, false}, // Z24S8
- {GL_DEPTH24_STENCIL8, GL_DEPTH_STENCIL, GL_UNSIGNED_INT_24_8, false}, // S8Z24
- {GL_DEPTH32F_STENCIL8, GL_DEPTH_STENCIL, GL_FLOAT_32_UNSIGNED_INT_24_8_REV, false}, // Z32FS8
+ {GL_DEPTH24_STENCIL8, GL_DEPTH_STENCIL, GL_UNSIGNED_INT_24_8}, // Z24S8
+ {GL_DEPTH24_STENCIL8, GL_DEPTH_STENCIL, GL_UNSIGNED_INT_24_8}, // S8Z24
+ {GL_DEPTH32F_STENCIL8, GL_DEPTH_STENCIL, GL_FLOAT_32_UNSIGNED_INT_24_8_REV}, // Z32FS8
}};
const FormatTuple& GetFormatTuple(PixelFormat pixel_format) {
ASSERT(static_cast<std::size_t>(pixel_format) < tex_format_tuples.size());
- const auto& format{tex_format_tuples[static_cast<std::size_t>(pixel_format)]};
- return format;
+ return tex_format_tuples[static_cast<std::size_t>(pixel_format)];
}
GLenum GetTextureTarget(const SurfaceTarget& target) {
@@ -242,13 +239,20 @@ OGLTexture CreateTexture(const SurfaceParams& params, GLenum target, GLenum inte
} // Anonymous namespace
-CachedSurface::CachedSurface(const GPUVAddr gpu_addr, const SurfaceParams& params)
- : VideoCommon::SurfaceBase<View>(gpu_addr, params) {
- const auto& tuple{GetFormatTuple(params.pixel_format)};
- internal_format = tuple.internal_format;
- format = tuple.format;
- type = tuple.type;
- is_compressed = tuple.compressed;
+CachedSurface::CachedSurface(const GPUVAddr gpu_addr, const SurfaceParams& params,
+ bool is_astc_supported)
+ : VideoCommon::SurfaceBase<View>(gpu_addr, params, is_astc_supported) {
+ if (is_converted) {
+ internal_format = params.srgb_conversion ? GL_SRGB8_ALPHA8 : GL_RGBA8;
+ format = GL_RGBA;
+ type = GL_UNSIGNED_BYTE;
+ } else {
+ const auto& tuple{GetFormatTuple(params.pixel_format)};
+ internal_format = tuple.internal_format;
+ format = tuple.format;
+ type = tuple.type;
+ is_compressed = params.IsCompressed();
+ }
target = GetTextureTarget(params.target);
texture = CreateTexture(params, target, internal_format, texture_buffer);
DecorateSurfaceName();
@@ -264,7 +268,7 @@ void CachedSurface::DownloadTexture(std::vector<u8>& staging_buffer) {
if (params.IsBuffer()) {
glGetNamedBufferSubData(texture_buffer.handle, 0,
- static_cast<GLsizeiptr>(params.GetHostSizeInBytes()),
+ static_cast<GLsizeiptr>(params.GetHostSizeInBytes(false)),
staging_buffer.data());
return;
}
@@ -272,9 +276,10 @@ void CachedSurface::DownloadTexture(std::vector<u8>& staging_buffer) {
SCOPE_EXIT({ glPixelStorei(GL_PACK_ROW_LENGTH, 0); });
for (u32 level = 0; level < params.emulated_levels; ++level) {
- glPixelStorei(GL_PACK_ALIGNMENT, std::min(8U, params.GetRowAlignment(level)));
+ glPixelStorei(GL_PACK_ALIGNMENT, std::min(8U, params.GetRowAlignment(level, is_converted)));
glPixelStorei(GL_PACK_ROW_LENGTH, static_cast<GLint>(params.GetMipWidth(level)));
- const std::size_t mip_offset = params.GetHostMipmapLevelOffset(level);
+ const std::size_t mip_offset = params.GetHostMipmapLevelOffset(level, is_converted);
+
u8* const mip_data = staging_buffer.data() + mip_offset;
const GLsizei size = static_cast<GLsizei>(params.GetHostMipmapSize(level));
if (is_compressed) {
@@ -294,14 +299,10 @@ void CachedSurface::UploadTexture(const std::vector<u8>& staging_buffer) {
}
void CachedSurface::UploadTextureMipmap(u32 level, const std::vector<u8>& staging_buffer) {
- glPixelStorei(GL_UNPACK_ALIGNMENT, std::min(8U, params.GetRowAlignment(level)));
+ glPixelStorei(GL_UNPACK_ALIGNMENT, std::min(8U, params.GetRowAlignment(level, is_converted)));
glPixelStorei(GL_UNPACK_ROW_LENGTH, static_cast<GLint>(params.GetMipWidth(level)));
- auto compression_type = params.GetCompressionType();
-
- const std::size_t mip_offset = compression_type == SurfaceCompression::Converted
- ? params.GetConvertedMipmapOffset(level)
- : params.GetHostMipmapLevelOffset(level);
+ const std::size_t mip_offset = params.GetHostMipmapLevelOffset(level, is_converted);
const u8* buffer{staging_buffer.data() + mip_offset};
if (is_compressed) {
const auto image_size{static_cast<GLsizei>(params.GetHostMipmapSize(level))};
@@ -410,14 +411,13 @@ CachedSurfaceView::~CachedSurfaceView() = default;
void CachedSurfaceView::Attach(GLenum attachment, GLenum target) const {
ASSERT(params.num_levels == 1);
- const GLuint texture = surface.GetTexture();
if (params.num_layers > 1) {
// Layered framebuffer attachments
UNIMPLEMENTED_IF(params.base_layer != 0);
switch (params.target) {
case SurfaceTarget::Texture2DArray:
- glFramebufferTexture(target, attachment, texture, params.base_level);
+ glFramebufferTexture(target, attachment, GetTexture(), 0);
break;
default:
UNIMPLEMENTED();
@@ -426,6 +426,7 @@ void CachedSurfaceView::Attach(GLenum attachment, GLenum target) const {
}
const GLenum view_target = surface.GetTarget();
+ const GLuint texture = surface.GetTexture();
switch (surface.GetSurfaceParams().target) {
case SurfaceTarget::Texture1D:
glFramebufferTexture1D(target, attachment, view_target, texture, params.base_level);
@@ -482,7 +483,7 @@ OGLTextureView CachedSurfaceView::CreateTextureView() const {
TextureCacheOpenGL::TextureCacheOpenGL(Core::System& system,
VideoCore::RasterizerInterface& rasterizer,
const Device& device, StateTracker& state_tracker)
- : TextureCacheBase{system, rasterizer}, state_tracker{state_tracker} {
+ : TextureCacheBase{system, rasterizer, device.HasASTC()}, state_tracker{state_tracker} {
src_framebuffer.Create();
dst_framebuffer.Create();
}
@@ -490,7 +491,7 @@ TextureCacheOpenGL::TextureCacheOpenGL(Core::System& system,
TextureCacheOpenGL::~TextureCacheOpenGL() = default;
Surface TextureCacheOpenGL::CreateSurface(GPUVAddr gpu_addr, const SurfaceParams& params) {
- return std::make_shared<CachedSurface>(gpu_addr, params);
+ return std::make_shared<CachedSurface>(gpu_addr, params, is_astc_supported);
}
void TextureCacheOpenGL::ImageCopy(Surface& src_surface, Surface& dst_surface,
@@ -596,7 +597,7 @@ void TextureCacheOpenGL::BufferCopy(Surface& src_surface, Surface& dst_surface)
glBindBuffer(GL_PIXEL_PACK_BUFFER, copy_pbo_handle);
- if (source_format.compressed) {
+ if (src_surface->IsCompressed()) {
glGetCompressedTextureImage(src_surface->GetTexture(), 0, static_cast<GLsizei>(source_size),
nullptr);
} else {
@@ -610,7 +611,7 @@ void TextureCacheOpenGL::BufferCopy(Surface& src_surface, Surface& dst_surface)
const GLsizei width = static_cast<GLsizei>(dst_params.width);
const GLsizei height = static_cast<GLsizei>(dst_params.height);
const GLsizei depth = static_cast<GLsizei>(dst_params.depth);
- if (dest_format.compressed) {
+ if (dst_surface->IsCompressed()) {
LOG_CRITICAL(HW_GPU, "Compressed buffer copy is unimplemented!");
UNREACHABLE();
} else {
diff --git a/src/video_core/renderer_opengl/gl_texture_cache.h b/src/video_core/renderer_opengl/gl_texture_cache.h
index 6658c6ffd..02d9981a1 100644
--- a/src/video_core/renderer_opengl/gl_texture_cache.h
+++ b/src/video_core/renderer_opengl/gl_texture_cache.h
@@ -37,7 +37,7 @@ class CachedSurface final : public VideoCommon::SurfaceBase<View> {
friend CachedSurfaceView;
public:
- explicit CachedSurface(GPUVAddr gpu_addr, const SurfaceParams& params);
+ explicit CachedSurface(GPUVAddr gpu_addr, const SurfaceParams& params, bool is_astc_supported);
~CachedSurface();
void UploadTexture(const std::vector<u8>& staging_buffer) override;
@@ -51,6 +51,10 @@ public:
return texture.handle;
}
+ bool IsCompressed() const {
+ return is_compressed;
+ }
+
protected:
void DecorateSurfaceName() override;
diff --git a/src/video_core/renderer_opengl/renderer_opengl.cpp b/src/video_core/renderer_opengl/renderer_opengl.cpp
index f1a28cc21..b2a179746 100644
--- a/src/video_core/renderer_opengl/renderer_opengl.cpp
+++ b/src/video_core/renderer_opengl/renderer_opengl.cpp
@@ -315,8 +315,8 @@ public:
RendererOpenGL::RendererOpenGL(Core::Frontend::EmuWindow& emu_window, Core::System& system,
Core::Frontend::GraphicsContext& context)
- : VideoCore::RendererBase{emu_window}, emu_window{emu_window}, system{system},
- frame_mailbox{}, context{context}, has_debug_tool{HasDebugTool()} {}
+ : RendererBase{emu_window}, emu_window{emu_window}, system{system}, context{context},
+ has_debug_tool{HasDebugTool()} {}
RendererOpenGL::~RendererOpenGL() = default;
diff --git a/src/video_core/renderer_vulkan/declarations.h b/src/video_core/renderer_vulkan/declarations.h
deleted file mode 100644
index 323bf6b39..000000000
--- a/src/video_core/renderer_vulkan/declarations.h
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2019 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
-
-#pragma once
-
-namespace vk {
-class DispatchLoaderDynamic;
-}
-
-namespace Vulkan {
-constexpr vk::DispatchLoaderDynamic* dont_use_me_dld = nullptr;
-}
-
-#define VULKAN_HPP_DEFAULT_DISPATCHER (*::Vulkan::dont_use_me_dld)
-#define VULKAN_HPP_ENABLE_DYNAMIC_LOADER_TOOL 0
-#define VULKAN_HPP_DISPATCH_LOADER_DYNAMIC 1
-#include <vulkan/vulkan.hpp>
-
-namespace Vulkan {
-
-// vulkan.hpp unique handlers use DispatchLoaderStatic
-template <typename T>
-using UniqueHandle = vk::UniqueHandle<T, vk::DispatchLoaderDynamic>;
-
-using UniqueAccelerationStructureNV = UniqueHandle<vk::AccelerationStructureNV>;
-using UniqueBuffer = UniqueHandle<vk::Buffer>;
-using UniqueBufferView = UniqueHandle<vk::BufferView>;
-using UniqueCommandBuffer = UniqueHandle<vk::CommandBuffer>;
-using UniqueCommandPool = UniqueHandle<vk::CommandPool>;
-using UniqueDescriptorPool = UniqueHandle<vk::DescriptorPool>;
-using UniqueDescriptorSet = UniqueHandle<vk::DescriptorSet>;
-using UniqueDescriptorSetLayout = UniqueHandle<vk::DescriptorSetLayout>;
-using UniqueDescriptorUpdateTemplate = UniqueHandle<vk::DescriptorUpdateTemplate>;
-using UniqueDevice = UniqueHandle<vk::Device>;
-using UniqueDeviceMemory = UniqueHandle<vk::DeviceMemory>;
-using UniqueEvent = UniqueHandle<vk::Event>;
-using UniqueFence = UniqueHandle<vk::Fence>;
-using UniqueFramebuffer = UniqueHandle<vk::Framebuffer>;
-using UniqueImage = UniqueHandle<vk::Image>;
-using UniqueImageView = UniqueHandle<vk::ImageView>;
-using UniqueIndirectCommandsLayoutNVX = UniqueHandle<vk::IndirectCommandsLayoutNVX>;
-using UniqueObjectTableNVX = UniqueHandle<vk::ObjectTableNVX>;
-using UniquePipeline = UniqueHandle<vk::Pipeline>;
-using UniquePipelineCache = UniqueHandle<vk::PipelineCache>;
-using UniquePipelineLayout = UniqueHandle<vk::PipelineLayout>;
-using UniqueQueryPool = UniqueHandle<vk::QueryPool>;
-using UniqueRenderPass = UniqueHandle<vk::RenderPass>;
-using UniqueSampler = UniqueHandle<vk::Sampler>;
-using UniqueSamplerYcbcrConversion = UniqueHandle<vk::SamplerYcbcrConversion>;
-using UniqueSemaphore = UniqueHandle<vk::Semaphore>;
-using UniqueShaderModule = UniqueHandle<vk::ShaderModule>;
-using UniqueSwapchainKHR = UniqueHandle<vk::SwapchainKHR>;
-using UniqueValidationCacheEXT = UniqueHandle<vk::ValidationCacheEXT>;
-using UniqueDebugReportCallbackEXT = UniqueHandle<vk::DebugReportCallbackEXT>;
-using UniqueDebugUtilsMessengerEXT = UniqueHandle<vk::DebugUtilsMessengerEXT>;
-
-} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/maxwell_to_vk.cpp b/src/video_core/renderer_vulkan/maxwell_to_vk.cpp
index 7480cb7c3..8681b821f 100644
--- a/src/video_core/renderer_vulkan/maxwell_to_vk.cpp
+++ b/src/video_core/renderer_vulkan/maxwell_to_vk.cpp
@@ -2,13 +2,15 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
+#include <iterator>
+
#include "common/assert.h"
#include "common/common_types.h"
#include "common/logging/log.h"
#include "video_core/engines/maxwell_3d.h"
-#include "video_core/renderer_vulkan/declarations.h"
#include "video_core/renderer_vulkan/maxwell_to_vk.h"
#include "video_core/renderer_vulkan/vk_device.h"
+#include "video_core/renderer_vulkan/wrapper.h"
#include "video_core/surface.h"
namespace Vulkan::MaxwellToVK {
@@ -17,88 +19,89 @@ using Maxwell = Tegra::Engines::Maxwell3D::Regs;
namespace Sampler {
-vk::Filter Filter(Tegra::Texture::TextureFilter filter) {
+VkFilter Filter(Tegra::Texture::TextureFilter filter) {
switch (filter) {
case Tegra::Texture::TextureFilter::Linear:
- return vk::Filter::eLinear;
+ return VK_FILTER_LINEAR;
case Tegra::Texture::TextureFilter::Nearest:
- return vk::Filter::eNearest;
+ return VK_FILTER_NEAREST;
}
UNIMPLEMENTED_MSG("Unimplemented sampler filter={}", static_cast<u32>(filter));
return {};
}
-vk::SamplerMipmapMode MipmapMode(Tegra::Texture::TextureMipmapFilter mipmap_filter) {
+VkSamplerMipmapMode MipmapMode(Tegra::Texture::TextureMipmapFilter mipmap_filter) {
switch (mipmap_filter) {
case Tegra::Texture::TextureMipmapFilter::None:
// TODO(Rodrigo): None seems to be mapped to OpenGL's mag and min filters without mipmapping
// (e.g. GL_NEAREST and GL_LINEAR). Vulkan doesn't have such a thing, find out if we have to
// use an image view with a single mipmap level to emulate this.
- return vk::SamplerMipmapMode::eLinear;
+ return VK_SAMPLER_MIPMAP_MODE_LINEAR;
+ ;
case Tegra::Texture::TextureMipmapFilter::Linear:
- return vk::SamplerMipmapMode::eLinear;
+ return VK_SAMPLER_MIPMAP_MODE_LINEAR;
case Tegra::Texture::TextureMipmapFilter::Nearest:
- return vk::SamplerMipmapMode::eNearest;
+ return VK_SAMPLER_MIPMAP_MODE_NEAREST;
}
UNIMPLEMENTED_MSG("Unimplemented sampler mipmap mode={}", static_cast<u32>(mipmap_filter));
return {};
}
-vk::SamplerAddressMode WrapMode(const VKDevice& device, Tegra::Texture::WrapMode wrap_mode,
- Tegra::Texture::TextureFilter filter) {
+VkSamplerAddressMode WrapMode(const VKDevice& device, Tegra::Texture::WrapMode wrap_mode,
+ Tegra::Texture::TextureFilter filter) {
switch (wrap_mode) {
case Tegra::Texture::WrapMode::Wrap:
- return vk::SamplerAddressMode::eRepeat;
+ return VK_SAMPLER_ADDRESS_MODE_REPEAT;
case Tegra::Texture::WrapMode::Mirror:
- return vk::SamplerAddressMode::eMirroredRepeat;
+ return VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT;
case Tegra::Texture::WrapMode::ClampToEdge:
- return vk::SamplerAddressMode::eClampToEdge;
+ return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE;
case Tegra::Texture::WrapMode::Border:
- return vk::SamplerAddressMode::eClampToBorder;
+ return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER;
case Tegra::Texture::WrapMode::Clamp:
- if (device.GetDriverID() == vk::DriverIdKHR::eNvidiaProprietary) {
+ if (device.GetDriverID() == VK_DRIVER_ID_NVIDIA_PROPRIETARY_KHR) {
// Nvidia's Vulkan driver defaults to GL_CLAMP on invalid enumerations, we can hack this
// by sending an invalid enumeration.
- return static_cast<vk::SamplerAddressMode>(0xcafe);
+ return static_cast<VkSamplerAddressMode>(0xcafe);
}
// TODO(Rodrigo): Emulate GL_CLAMP properly on other vendors
switch (filter) {
case Tegra::Texture::TextureFilter::Nearest:
- return vk::SamplerAddressMode::eClampToEdge;
+ return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE;
case Tegra::Texture::TextureFilter::Linear:
- return vk::SamplerAddressMode::eClampToBorder;
+ return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER;
}
UNREACHABLE();
- return vk::SamplerAddressMode::eClampToEdge;
+ return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE;
case Tegra::Texture::WrapMode::MirrorOnceClampToEdge:
- return vk::SamplerAddressMode::eMirrorClampToEdge;
+ return VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE;
case Tegra::Texture::WrapMode::MirrorOnceBorder:
UNIMPLEMENTED();
- return vk::SamplerAddressMode::eMirrorClampToEdge;
+ return VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE;
default:
UNIMPLEMENTED_MSG("Unimplemented wrap mode={}", static_cast<u32>(wrap_mode));
return {};
}
}
-vk::CompareOp DepthCompareFunction(Tegra::Texture::DepthCompareFunc depth_compare_func) {
+VkCompareOp DepthCompareFunction(Tegra::Texture::DepthCompareFunc depth_compare_func) {
switch (depth_compare_func) {
case Tegra::Texture::DepthCompareFunc::Never:
- return vk::CompareOp::eNever;
+ return VK_COMPARE_OP_NEVER;
case Tegra::Texture::DepthCompareFunc::Less:
- return vk::CompareOp::eLess;
+ return VK_COMPARE_OP_LESS;
case Tegra::Texture::DepthCompareFunc::LessEqual:
- return vk::CompareOp::eLessOrEqual;
+ return VK_COMPARE_OP_LESS_OR_EQUAL;
case Tegra::Texture::DepthCompareFunc::Equal:
- return vk::CompareOp::eEqual;
+ return VK_COMPARE_OP_EQUAL;
case Tegra::Texture::DepthCompareFunc::NotEqual:
- return vk::CompareOp::eNotEqual;
+ return VK_COMPARE_OP_NOT_EQUAL;
case Tegra::Texture::DepthCompareFunc::Greater:
- return vk::CompareOp::eGreater;
+ return VK_COMPARE_OP_GREATER;
case Tegra::Texture::DepthCompareFunc::GreaterEqual:
- return vk::CompareOp::eGreaterOrEqual;
+ return VK_COMPARE_OP_GREATER_OR_EQUAL;
case Tegra::Texture::DepthCompareFunc::Always:
- return vk::CompareOp::eAlways;
+ return VK_COMPARE_OP_ALWAYS;
}
UNIMPLEMENTED_MSG("Unimplemented sampler depth compare function={}",
static_cast<u32>(depth_compare_func));
@@ -112,92 +115,92 @@ namespace {
enum : u32 { Attachable = 1, Storage = 2 };
struct FormatTuple {
- vk::Format format; ///< Vulkan format
- int usage; ///< Describes image format usage
+ VkFormat format; ///< Vulkan format
+ int usage = 0; ///< Describes image format usage
} constexpr tex_format_tuples[] = {
- {vk::Format::eA8B8G8R8UnormPack32, Attachable | Storage}, // ABGR8U
- {vk::Format::eA8B8G8R8SnormPack32, Attachable | Storage}, // ABGR8S
- {vk::Format::eA8B8G8R8UintPack32, Attachable | Storage}, // ABGR8UI
- {vk::Format::eB5G6R5UnormPack16, {}}, // B5G6R5U
- {vk::Format::eA2B10G10R10UnormPack32, Attachable | Storage}, // A2B10G10R10U
- {vk::Format::eA1R5G5B5UnormPack16, Attachable}, // A1B5G5R5U (flipped with swizzle)
- {vk::Format::eR8Unorm, Attachable | Storage}, // R8U
- {vk::Format::eR8Uint, Attachable | Storage}, // R8UI
- {vk::Format::eR16G16B16A16Sfloat, Attachable | Storage}, // RGBA16F
- {vk::Format::eR16G16B16A16Unorm, Attachable | Storage}, // RGBA16U
- {vk::Format::eR16G16B16A16Snorm, Attachable | Storage}, // RGBA16S
- {vk::Format::eR16G16B16A16Uint, Attachable | Storage}, // RGBA16UI
- {vk::Format::eB10G11R11UfloatPack32, Attachable | Storage}, // R11FG11FB10F
- {vk::Format::eR32G32B32A32Uint, Attachable | Storage}, // RGBA32UI
- {vk::Format::eBc1RgbaUnormBlock, {}}, // DXT1
- {vk::Format::eBc2UnormBlock, {}}, // DXT23
- {vk::Format::eBc3UnormBlock, {}}, // DXT45
- {vk::Format::eBc4UnormBlock, {}}, // DXN1
- {vk::Format::eBc5UnormBlock, {}}, // DXN2UNORM
- {vk::Format::eBc5SnormBlock, {}}, // DXN2SNORM
- {vk::Format::eBc7UnormBlock, {}}, // BC7U
- {vk::Format::eBc6HUfloatBlock, {}}, // BC6H_UF16
- {vk::Format::eBc6HSfloatBlock, {}}, // BC6H_SF16
- {vk::Format::eAstc4x4UnormBlock, {}}, // ASTC_2D_4X4
- {vk::Format::eB8G8R8A8Unorm, {}}, // BGRA8
- {vk::Format::eR32G32B32A32Sfloat, Attachable | Storage}, // RGBA32F
- {vk::Format::eR32G32Sfloat, Attachable | Storage}, // RG32F
- {vk::Format::eR32Sfloat, Attachable | Storage}, // R32F
- {vk::Format::eR16Sfloat, Attachable | Storage}, // R16F
- {vk::Format::eR16Unorm, Attachable | Storage}, // R16U
- {vk::Format::eUndefined, {}}, // R16S
- {vk::Format::eUndefined, {}}, // R16UI
- {vk::Format::eUndefined, {}}, // R16I
- {vk::Format::eR16G16Unorm, Attachable | Storage}, // RG16
- {vk::Format::eR16G16Sfloat, Attachable | Storage}, // RG16F
- {vk::Format::eUndefined, {}}, // RG16UI
- {vk::Format::eUndefined, {}}, // RG16I
- {vk::Format::eR16G16Snorm, Attachable | Storage}, // RG16S
- {vk::Format::eUndefined, {}}, // RGB32F
- {vk::Format::eR8G8B8A8Srgb, Attachable}, // RGBA8_SRGB
- {vk::Format::eR8G8Unorm, Attachable | Storage}, // RG8U
- {vk::Format::eR8G8Snorm, Attachable | Storage}, // RG8S
- {vk::Format::eR32G32Uint, Attachable | Storage}, // RG32UI
- {vk::Format::eUndefined, {}}, // RGBX16F
- {vk::Format::eR32Uint, Attachable | Storage}, // R32UI
- {vk::Format::eR32Sint, Attachable | Storage}, // R32I
- {vk::Format::eAstc8x8UnormBlock, {}}, // ASTC_2D_8X8
- {vk::Format::eUndefined, {}}, // ASTC_2D_8X5
- {vk::Format::eUndefined, {}}, // ASTC_2D_5X4
- {vk::Format::eUndefined, {}}, // BGRA8_SRGB
- {vk::Format::eBc1RgbaSrgbBlock, {}}, // DXT1_SRGB
- {vk::Format::eBc2SrgbBlock, {}}, // DXT23_SRGB
- {vk::Format::eBc3SrgbBlock, {}}, // DXT45_SRGB
- {vk::Format::eBc7SrgbBlock, {}}, // BC7U_SRGB
- {vk::Format::eR4G4B4A4UnormPack16, Attachable}, // R4G4B4A4U
- {vk::Format::eAstc4x4SrgbBlock, {}}, // ASTC_2D_4X4_SRGB
- {vk::Format::eAstc8x8SrgbBlock, {}}, // ASTC_2D_8X8_SRGB
- {vk::Format::eAstc8x5SrgbBlock, {}}, // ASTC_2D_8X5_SRGB
- {vk::Format::eAstc5x4SrgbBlock, {}}, // ASTC_2D_5X4_SRGB
- {vk::Format::eAstc5x5UnormBlock, {}}, // ASTC_2D_5X5
- {vk::Format::eAstc5x5SrgbBlock, {}}, // ASTC_2D_5X5_SRGB
- {vk::Format::eAstc10x8UnormBlock, {}}, // ASTC_2D_10X8
- {vk::Format::eAstc10x8SrgbBlock, {}}, // ASTC_2D_10X8_SRGB
- {vk::Format::eAstc6x6UnormBlock, {}}, // ASTC_2D_6X6
- {vk::Format::eAstc6x6SrgbBlock, {}}, // ASTC_2D_6X6_SRGB
- {vk::Format::eAstc10x10UnormBlock, {}}, // ASTC_2D_10X10
- {vk::Format::eAstc10x10SrgbBlock, {}}, // ASTC_2D_10X10_SRGB
- {vk::Format::eAstc12x12UnormBlock, {}}, // ASTC_2D_12X12
- {vk::Format::eAstc12x12SrgbBlock, {}}, // ASTC_2D_12X12_SRGB
- {vk::Format::eAstc8x6UnormBlock, {}}, // ASTC_2D_8X6
- {vk::Format::eAstc8x6SrgbBlock, {}}, // ASTC_2D_8X6_SRGB
- {vk::Format::eAstc6x5UnormBlock, {}}, // ASTC_2D_6X5
- {vk::Format::eAstc6x5SrgbBlock, {}}, // ASTC_2D_6X5_SRGB
- {vk::Format::eE5B9G9R9UfloatPack32, {}}, // E5B9G9R9F
+ {VK_FORMAT_A8B8G8R8_UNORM_PACK32, Attachable | Storage}, // ABGR8U
+ {VK_FORMAT_A8B8G8R8_SNORM_PACK32, Attachable | Storage}, // ABGR8S
+ {VK_FORMAT_A8B8G8R8_UINT_PACK32, Attachable | Storage}, // ABGR8UI
+ {VK_FORMAT_B5G6R5_UNORM_PACK16}, // B5G6R5U
+ {VK_FORMAT_A2B10G10R10_UNORM_PACK32, Attachable | Storage}, // A2B10G10R10U
+ {VK_FORMAT_A1R5G5B5_UNORM_PACK16, Attachable}, // A1B5G5R5U (flipped with swizzle)
+ {VK_FORMAT_R8_UNORM, Attachable | Storage}, // R8U
+ {VK_FORMAT_R8_UINT, Attachable | Storage}, // R8UI
+ {VK_FORMAT_R16G16B16A16_SFLOAT, Attachable | Storage}, // RGBA16F
+ {VK_FORMAT_R16G16B16A16_UNORM, Attachable | Storage}, // RGBA16U
+ {VK_FORMAT_R16G16B16A16_SNORM, Attachable | Storage}, // RGBA16S
+ {VK_FORMAT_R16G16B16A16_UINT, Attachable | Storage}, // RGBA16UI
+ {VK_FORMAT_B10G11R11_UFLOAT_PACK32, Attachable | Storage}, // R11FG11FB10F
+ {VK_FORMAT_R32G32B32A32_UINT, Attachable | Storage}, // RGBA32UI
+ {VK_FORMAT_BC1_RGBA_UNORM_BLOCK}, // DXT1
+ {VK_FORMAT_BC2_UNORM_BLOCK}, // DXT23
+ {VK_FORMAT_BC3_UNORM_BLOCK}, // DXT45
+ {VK_FORMAT_BC4_UNORM_BLOCK}, // DXN1
+ {VK_FORMAT_BC5_UNORM_BLOCK}, // DXN2UNORM
+ {VK_FORMAT_BC5_SNORM_BLOCK}, // DXN2SNORM
+ {VK_FORMAT_BC7_UNORM_BLOCK}, // BC7U
+ {VK_FORMAT_BC6H_UFLOAT_BLOCK}, // BC6H_UF16
+ {VK_FORMAT_BC6H_SFLOAT_BLOCK}, // BC6H_SF16
+ {VK_FORMAT_ASTC_4x4_UNORM_BLOCK}, // ASTC_2D_4X4
+ {VK_FORMAT_B8G8R8A8_UNORM}, // BGRA8
+ {VK_FORMAT_R32G32B32A32_SFLOAT, Attachable | Storage}, // RGBA32F
+ {VK_FORMAT_R32G32_SFLOAT, Attachable | Storage}, // RG32F
+ {VK_FORMAT_R32_SFLOAT, Attachable | Storage}, // R32F
+ {VK_FORMAT_R16_SFLOAT, Attachable | Storage}, // R16F
+ {VK_FORMAT_R16_UNORM, Attachable | Storage}, // R16U
+ {VK_FORMAT_UNDEFINED}, // R16S
+ {VK_FORMAT_UNDEFINED}, // R16UI
+ {VK_FORMAT_UNDEFINED}, // R16I
+ {VK_FORMAT_R16G16_UNORM, Attachable | Storage}, // RG16
+ {VK_FORMAT_R16G16_SFLOAT, Attachable | Storage}, // RG16F
+ {VK_FORMAT_UNDEFINED}, // RG16UI
+ {VK_FORMAT_UNDEFINED}, // RG16I
+ {VK_FORMAT_R16G16_SNORM, Attachable | Storage}, // RG16S
+ {VK_FORMAT_UNDEFINED}, // RGB32F
+ {VK_FORMAT_R8G8B8A8_SRGB, Attachable}, // RGBA8_SRGB
+ {VK_FORMAT_R8G8_UNORM, Attachable | Storage}, // RG8U
+ {VK_FORMAT_R8G8_SNORM, Attachable | Storage}, // RG8S
+ {VK_FORMAT_R32G32_UINT, Attachable | Storage}, // RG32UI
+ {VK_FORMAT_UNDEFINED}, // RGBX16F
+ {VK_FORMAT_R32_UINT, Attachable | Storage}, // R32UI
+ {VK_FORMAT_R32_SINT, Attachable | Storage}, // R32I
+ {VK_FORMAT_ASTC_8x8_UNORM_BLOCK}, // ASTC_2D_8X8
+ {VK_FORMAT_UNDEFINED}, // ASTC_2D_8X5
+ {VK_FORMAT_UNDEFINED}, // ASTC_2D_5X4
+ {VK_FORMAT_UNDEFINED}, // BGRA8_SRGB
+ {VK_FORMAT_BC1_RGBA_SRGB_BLOCK}, // DXT1_SRGB
+ {VK_FORMAT_BC2_SRGB_BLOCK}, // DXT23_SRGB
+ {VK_FORMAT_BC3_SRGB_BLOCK}, // DXT45_SRGB
+ {VK_FORMAT_BC7_SRGB_BLOCK}, // BC7U_SRGB
+ {VK_FORMAT_R4G4B4A4_UNORM_PACK16, Attachable}, // R4G4B4A4U
+ {VK_FORMAT_ASTC_4x4_SRGB_BLOCK}, // ASTC_2D_4X4_SRGB
+ {VK_FORMAT_ASTC_8x8_SRGB_BLOCK}, // ASTC_2D_8X8_SRGB
+ {VK_FORMAT_ASTC_8x5_SRGB_BLOCK}, // ASTC_2D_8X5_SRGB
+ {VK_FORMAT_ASTC_5x4_SRGB_BLOCK}, // ASTC_2D_5X4_SRGB
+ {VK_FORMAT_ASTC_5x5_UNORM_BLOCK}, // ASTC_2D_5X5
+ {VK_FORMAT_ASTC_5x5_SRGB_BLOCK}, // ASTC_2D_5X5_SRGB
+ {VK_FORMAT_ASTC_10x8_UNORM_BLOCK}, // ASTC_2D_10X8
+ {VK_FORMAT_ASTC_10x8_SRGB_BLOCK}, // ASTC_2D_10X8_SRGB
+ {VK_FORMAT_ASTC_6x6_UNORM_BLOCK}, // ASTC_2D_6X6
+ {VK_FORMAT_ASTC_6x6_SRGB_BLOCK}, // ASTC_2D_6X6_SRGB
+ {VK_FORMAT_ASTC_10x10_UNORM_BLOCK}, // ASTC_2D_10X10
+ {VK_FORMAT_ASTC_10x10_SRGB_BLOCK}, // ASTC_2D_10X10_SRGB
+ {VK_FORMAT_ASTC_12x12_UNORM_BLOCK}, // ASTC_2D_12X12
+ {VK_FORMAT_ASTC_12x12_SRGB_BLOCK}, // ASTC_2D_12X12_SRGB
+ {VK_FORMAT_ASTC_8x6_UNORM_BLOCK}, // ASTC_2D_8X6
+ {VK_FORMAT_ASTC_8x6_SRGB_BLOCK}, // ASTC_2D_8X6_SRGB
+ {VK_FORMAT_ASTC_6x5_UNORM_BLOCK}, // ASTC_2D_6X5
+ {VK_FORMAT_ASTC_6x5_SRGB_BLOCK}, // ASTC_2D_6X5_SRGB
+ {VK_FORMAT_E5B9G9R9_UFLOAT_PACK32}, // E5B9G9R9F
// Depth formats
- {vk::Format::eD32Sfloat, Attachable}, // Z32F
- {vk::Format::eD16Unorm, Attachable}, // Z16
+ {VK_FORMAT_D32_SFLOAT, Attachable}, // Z32F
+ {VK_FORMAT_D16_UNORM, Attachable}, // Z16
// DepthStencil formats
- {vk::Format::eD24UnormS8Uint, Attachable}, // Z24S8
- {vk::Format::eD24UnormS8Uint, Attachable}, // S8Z24 (emulated)
- {vk::Format::eD32SfloatS8Uint, Attachable}, // Z32FS8
+ {VK_FORMAT_D24_UNORM_S8_UINT, Attachable}, // Z24S8
+ {VK_FORMAT_D24_UNORM_S8_UINT, Attachable}, // S8Z24 (emulated)
+ {VK_FORMAT_D32_SFLOAT_S8_UINT, Attachable}, // Z32FS8
};
static_assert(std::size(tex_format_tuples) == VideoCore::Surface::MaxPixelFormat);
@@ -212,106 +215,106 @@ FormatInfo SurfaceFormat(const VKDevice& device, FormatType format_type, PixelFo
ASSERT(static_cast<std::size_t>(pixel_format) < std::size(tex_format_tuples));
auto tuple = tex_format_tuples[static_cast<std::size_t>(pixel_format)];
- if (tuple.format == vk::Format::eUndefined) {
+ if (tuple.format == VK_FORMAT_UNDEFINED) {
UNIMPLEMENTED_MSG("Unimplemented texture format with pixel format={}",
static_cast<u32>(pixel_format));
- return {vk::Format::eA8B8G8R8UnormPack32, true, true};
+ return {VK_FORMAT_A8B8G8R8_UNORM_PACK32, true, true};
}
// Use ABGR8 on hardware that doesn't support ASTC natively
if (!device.IsOptimalAstcSupported() && VideoCore::Surface::IsPixelFormatASTC(pixel_format)) {
tuple.format = VideoCore::Surface::IsPixelFormatSRGB(pixel_format)
- ? vk::Format::eA8B8G8R8SrgbPack32
- : vk::Format::eA8B8G8R8UnormPack32;
+ ? VK_FORMAT_A8B8G8R8_SRGB_PACK32
+ : VK_FORMAT_A8B8G8R8_UNORM_PACK32;
}
const bool attachable = tuple.usage & Attachable;
const bool storage = tuple.usage & Storage;
- vk::FormatFeatureFlags usage;
+ VkFormatFeatureFlags usage;
if (format_type == FormatType::Buffer) {
- usage = vk::FormatFeatureFlagBits::eStorageTexelBuffer |
- vk::FormatFeatureFlagBits::eUniformTexelBuffer;
+ usage =
+ VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT | VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT;
} else {
- usage = vk::FormatFeatureFlagBits::eSampledImage | vk::FormatFeatureFlagBits::eTransferDst |
- vk::FormatFeatureFlagBits::eTransferSrc;
+ usage = VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT | VK_FORMAT_FEATURE_TRANSFER_DST_BIT |
+ VK_FORMAT_FEATURE_TRANSFER_SRC_BIT;
if (attachable) {
- usage |= IsZetaFormat(pixel_format) ? vk::FormatFeatureFlagBits::eDepthStencilAttachment
- : vk::FormatFeatureFlagBits::eColorAttachment;
+ usage |= IsZetaFormat(pixel_format) ? VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT
+ : VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT;
}
if (storage) {
- usage |= vk::FormatFeatureFlagBits::eStorageImage;
+ usage |= VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT;
}
}
return {device.GetSupportedFormat(tuple.format, usage, format_type), attachable, storage};
}
-vk::ShaderStageFlagBits ShaderStage(Tegra::Engines::ShaderType stage) {
+VkShaderStageFlagBits ShaderStage(Tegra::Engines::ShaderType stage) {
switch (stage) {
case Tegra::Engines::ShaderType::Vertex:
- return vk::ShaderStageFlagBits::eVertex;
+ return VK_SHADER_STAGE_VERTEX_BIT;
case Tegra::Engines::ShaderType::TesselationControl:
- return vk::ShaderStageFlagBits::eTessellationControl;
+ return VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT;
case Tegra::Engines::ShaderType::TesselationEval:
- return vk::ShaderStageFlagBits::eTessellationEvaluation;
+ return VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT;
case Tegra::Engines::ShaderType::Geometry:
- return vk::ShaderStageFlagBits::eGeometry;
+ return VK_SHADER_STAGE_GEOMETRY_BIT;
case Tegra::Engines::ShaderType::Fragment:
- return vk::ShaderStageFlagBits::eFragment;
+ return VK_SHADER_STAGE_FRAGMENT_BIT;
case Tegra::Engines::ShaderType::Compute:
- return vk::ShaderStageFlagBits::eCompute;
+ return VK_SHADER_STAGE_COMPUTE_BIT;
}
UNIMPLEMENTED_MSG("Unimplemented shader stage={}", static_cast<u32>(stage));
return {};
}
-vk::PrimitiveTopology PrimitiveTopology([[maybe_unused]] const VKDevice& device,
- Maxwell::PrimitiveTopology topology) {
+VkPrimitiveTopology PrimitiveTopology([[maybe_unused]] const VKDevice& device,
+ Maxwell::PrimitiveTopology topology) {
switch (topology) {
case Maxwell::PrimitiveTopology::Points:
- return vk::PrimitiveTopology::ePointList;
+ return VK_PRIMITIVE_TOPOLOGY_POINT_LIST;
case Maxwell::PrimitiveTopology::Lines:
- return vk::PrimitiveTopology::eLineList;
+ return VK_PRIMITIVE_TOPOLOGY_LINE_LIST;
case Maxwell::PrimitiveTopology::LineStrip:
- return vk::PrimitiveTopology::eLineStrip;
+ return VK_PRIMITIVE_TOPOLOGY_LINE_STRIP;
case Maxwell::PrimitiveTopology::Triangles:
- return vk::PrimitiveTopology::eTriangleList;
+ return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST;
case Maxwell::PrimitiveTopology::TriangleStrip:
- return vk::PrimitiveTopology::eTriangleStrip;
+ return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP;
case Maxwell::PrimitiveTopology::TriangleFan:
- return vk::PrimitiveTopology::eTriangleFan;
+ return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN;
case Maxwell::PrimitiveTopology::Quads:
// TODO(Rodrigo): Use VK_PRIMITIVE_TOPOLOGY_QUAD_LIST_EXT whenever it releases
- return vk::PrimitiveTopology::eTriangleList;
+ return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST;
case Maxwell::PrimitiveTopology::Patches:
- return vk::PrimitiveTopology::ePatchList;
+ return VK_PRIMITIVE_TOPOLOGY_PATCH_LIST;
default:
UNIMPLEMENTED_MSG("Unimplemented topology={}", static_cast<u32>(topology));
return {};
}
}
-vk::Format VertexFormat(Maxwell::VertexAttribute::Type type, Maxwell::VertexAttribute::Size size) {
+VkFormat VertexFormat(Maxwell::VertexAttribute::Type type, Maxwell::VertexAttribute::Size size) {
switch (type) {
case Maxwell::VertexAttribute::Type::SignedNorm:
switch (size) {
case Maxwell::VertexAttribute::Size::Size_8:
- return vk::Format::eR8Snorm;
+ return VK_FORMAT_R8_SNORM;
case Maxwell::VertexAttribute::Size::Size_8_8:
- return vk::Format::eR8G8Snorm;
+ return VK_FORMAT_R8G8_SNORM;
case Maxwell::VertexAttribute::Size::Size_8_8_8:
- return vk::Format::eR8G8B8Snorm;
+ return VK_FORMAT_R8G8B8_SNORM;
case Maxwell::VertexAttribute::Size::Size_8_8_8_8:
- return vk::Format::eR8G8B8A8Snorm;
+ return VK_FORMAT_R8G8B8A8_SNORM;
case Maxwell::VertexAttribute::Size::Size_16:
- return vk::Format::eR16Snorm;
+ return VK_FORMAT_R16_SNORM;
case Maxwell::VertexAttribute::Size::Size_16_16:
- return vk::Format::eR16G16Snorm;
+ return VK_FORMAT_R16G16_SNORM;
case Maxwell::VertexAttribute::Size::Size_16_16_16:
- return vk::Format::eR16G16B16Snorm;
+ return VK_FORMAT_R16G16B16_SNORM;
case Maxwell::VertexAttribute::Size::Size_16_16_16_16:
- return vk::Format::eR16G16B16A16Snorm;
+ return VK_FORMAT_R16G16B16A16_SNORM;
case Maxwell::VertexAttribute::Size::Size_10_10_10_2:
- return vk::Format::eA2B10G10R10SnormPack32;
+ return VK_FORMAT_A2B10G10R10_SNORM_PACK32;
default:
break;
}
@@ -319,23 +322,23 @@ vk::Format VertexFormat(Maxwell::VertexAttribute::Type type, Maxwell::VertexAttr
case Maxwell::VertexAttribute::Type::UnsignedNorm:
switch (size) {
case Maxwell::VertexAttribute::Size::Size_8:
- return vk::Format::eR8Unorm;
+ return VK_FORMAT_R8_UNORM;
case Maxwell::VertexAttribute::Size::Size_8_8:
- return vk::Format::eR8G8Unorm;
+ return VK_FORMAT_R8G8_UNORM;
case Maxwell::VertexAttribute::Size::Size_8_8_8:
- return vk::Format::eR8G8B8Unorm;
+ return VK_FORMAT_R8G8B8_UNORM;
case Maxwell::VertexAttribute::Size::Size_8_8_8_8:
- return vk::Format::eR8G8B8A8Unorm;
+ return VK_FORMAT_R8G8B8A8_UNORM;
case Maxwell::VertexAttribute::Size::Size_16:
- return vk::Format::eR16Unorm;
+ return VK_FORMAT_R16_UNORM;
case Maxwell::VertexAttribute::Size::Size_16_16:
- return vk::Format::eR16G16Unorm;
+ return VK_FORMAT_R16G16_UNORM;
case Maxwell::VertexAttribute::Size::Size_16_16_16:
- return vk::Format::eR16G16B16Unorm;
+ return VK_FORMAT_R16G16B16_UNORM;
case Maxwell::VertexAttribute::Size::Size_16_16_16_16:
- return vk::Format::eR16G16B16A16Unorm;
+ return VK_FORMAT_R16G16B16A16_UNORM;
case Maxwell::VertexAttribute::Size::Size_10_10_10_2:
- return vk::Format::eA2B10G10R10UnormPack32;
+ return VK_FORMAT_A2B10G10R10_UNORM_PACK32;
default:
break;
}
@@ -343,59 +346,69 @@ vk::Format VertexFormat(Maxwell::VertexAttribute::Type type, Maxwell::VertexAttr
case Maxwell::VertexAttribute::Type::SignedInt:
switch (size) {
case Maxwell::VertexAttribute::Size::Size_16_16_16_16:
- return vk::Format::eR16G16B16A16Sint;
+ return VK_FORMAT_R16G16B16A16_SINT;
case Maxwell::VertexAttribute::Size::Size_8:
- return vk::Format::eR8Sint;
+ return VK_FORMAT_R8_SINT;
case Maxwell::VertexAttribute::Size::Size_8_8:
- return vk::Format::eR8G8Sint;
+ return VK_FORMAT_R8G8_SINT;
case Maxwell::VertexAttribute::Size::Size_8_8_8:
- return vk::Format::eR8G8B8Sint;
+ return VK_FORMAT_R8G8B8_SINT;
case Maxwell::VertexAttribute::Size::Size_8_8_8_8:
- return vk::Format::eR8G8B8A8Sint;
+ return VK_FORMAT_R8G8B8A8_SINT;
case Maxwell::VertexAttribute::Size::Size_32:
- return vk::Format::eR32Sint;
+ return VK_FORMAT_R32_SINT;
default:
break;
}
+ break;
case Maxwell::VertexAttribute::Type::UnsignedInt:
switch (size) {
case Maxwell::VertexAttribute::Size::Size_8:
- return vk::Format::eR8Uint;
+ return VK_FORMAT_R8_UINT;
case Maxwell::VertexAttribute::Size::Size_8_8:
- return vk::Format::eR8G8Uint;
+ return VK_FORMAT_R8G8_UINT;
case Maxwell::VertexAttribute::Size::Size_8_8_8:
- return vk::Format::eR8G8B8Uint;
+ return VK_FORMAT_R8G8B8_UINT;
case Maxwell::VertexAttribute::Size::Size_8_8_8_8:
- return vk::Format::eR8G8B8A8Uint;
+ return VK_FORMAT_R8G8B8A8_UINT;
+ case Maxwell::VertexAttribute::Size::Size_16:
+ return VK_FORMAT_R16_UINT;
+ case Maxwell::VertexAttribute::Size::Size_16_16:
+ return VK_FORMAT_R16G16_UINT;
+ case Maxwell::VertexAttribute::Size::Size_16_16_16:
+ return VK_FORMAT_R16G16B16_UINT;
+ case Maxwell::VertexAttribute::Size::Size_16_16_16_16:
+ return VK_FORMAT_R16G16B16A16_UINT;
case Maxwell::VertexAttribute::Size::Size_32:
- return vk::Format::eR32Uint;
+ return VK_FORMAT_R32_UINT;
case Maxwell::VertexAttribute::Size::Size_32_32:
- return vk::Format::eR32G32Uint;
+ return VK_FORMAT_R32G32_UINT;
case Maxwell::VertexAttribute::Size::Size_32_32_32:
- return vk::Format::eR32G32B32Uint;
+ return VK_FORMAT_R32G32B32_UINT;
case Maxwell::VertexAttribute::Size::Size_32_32_32_32:
- return vk::Format::eR32G32B32A32Uint;
+ return VK_FORMAT_R32G32B32A32_UINT;
default:
break;
}
+ break;
case Maxwell::VertexAttribute::Type::UnsignedScaled:
switch (size) {
case Maxwell::VertexAttribute::Size::Size_8:
- return vk::Format::eR8Uscaled;
+ return VK_FORMAT_R8_USCALED;
case Maxwell::VertexAttribute::Size::Size_8_8:
- return vk::Format::eR8G8Uscaled;
+ return VK_FORMAT_R8G8_USCALED;
case Maxwell::VertexAttribute::Size::Size_8_8_8:
- return vk::Format::eR8G8B8Uscaled;
+ return VK_FORMAT_R8G8B8_USCALED;
case Maxwell::VertexAttribute::Size::Size_8_8_8_8:
- return vk::Format::eR8G8B8A8Uscaled;
+ return VK_FORMAT_R8G8B8A8_USCALED;
case Maxwell::VertexAttribute::Size::Size_16:
- return vk::Format::eR16Uscaled;
+ return VK_FORMAT_R16_USCALED;
case Maxwell::VertexAttribute::Size::Size_16_16:
- return vk::Format::eR16G16Uscaled;
+ return VK_FORMAT_R16G16_USCALED;
case Maxwell::VertexAttribute::Size::Size_16_16_16:
- return vk::Format::eR16G16B16Uscaled;
+ return VK_FORMAT_R16G16B16_USCALED;
case Maxwell::VertexAttribute::Size::Size_16_16_16_16:
- return vk::Format::eR16G16B16A16Uscaled;
+ return VK_FORMAT_R16G16B16A16_USCALED;
default:
break;
}
@@ -403,21 +416,21 @@ vk::Format VertexFormat(Maxwell::VertexAttribute::Type type, Maxwell::VertexAttr
case Maxwell::VertexAttribute::Type::SignedScaled:
switch (size) {
case Maxwell::VertexAttribute::Size::Size_8:
- return vk::Format::eR8Sscaled;
+ return VK_FORMAT_R8_SSCALED;
case Maxwell::VertexAttribute::Size::Size_8_8:
- return vk::Format::eR8G8Sscaled;
+ return VK_FORMAT_R8G8_SSCALED;
case Maxwell::VertexAttribute::Size::Size_8_8_8:
- return vk::Format::eR8G8B8Sscaled;
+ return VK_FORMAT_R8G8B8_SSCALED;
case Maxwell::VertexAttribute::Size::Size_8_8_8_8:
- return vk::Format::eR8G8B8A8Sscaled;
+ return VK_FORMAT_R8G8B8A8_SSCALED;
case Maxwell::VertexAttribute::Size::Size_16:
- return vk::Format::eR16Sscaled;
+ return VK_FORMAT_R16_SSCALED;
case Maxwell::VertexAttribute::Size::Size_16_16:
- return vk::Format::eR16G16Sscaled;
+ return VK_FORMAT_R16G16_SSCALED;
case Maxwell::VertexAttribute::Size::Size_16_16_16:
- return vk::Format::eR16G16B16Sscaled;
+ return VK_FORMAT_R16G16B16_SSCALED;
case Maxwell::VertexAttribute::Size::Size_16_16_16_16:
- return vk::Format::eR16G16B16A16Sscaled;
+ return VK_FORMAT_R16G16B16A16_SSCALED;
default:
break;
}
@@ -425,21 +438,21 @@ vk::Format VertexFormat(Maxwell::VertexAttribute::Type type, Maxwell::VertexAttr
case Maxwell::VertexAttribute::Type::Float:
switch (size) {
case Maxwell::VertexAttribute::Size::Size_32:
- return vk::Format::eR32Sfloat;
+ return VK_FORMAT_R32_SFLOAT;
case Maxwell::VertexAttribute::Size::Size_32_32:
- return vk::Format::eR32G32Sfloat;
+ return VK_FORMAT_R32G32_SFLOAT;
case Maxwell::VertexAttribute::Size::Size_32_32_32:
- return vk::Format::eR32G32B32Sfloat;
+ return VK_FORMAT_R32G32B32_SFLOAT;
case Maxwell::VertexAttribute::Size::Size_32_32_32_32:
- return vk::Format::eR32G32B32A32Sfloat;
+ return VK_FORMAT_R32G32B32A32_SFLOAT;
case Maxwell::VertexAttribute::Size::Size_16:
- return vk::Format::eR16Sfloat;
+ return VK_FORMAT_R16_SFLOAT;
case Maxwell::VertexAttribute::Size::Size_16_16:
- return vk::Format::eR16G16Sfloat;
+ return VK_FORMAT_R16G16_SFLOAT;
case Maxwell::VertexAttribute::Size::Size_16_16_16:
- return vk::Format::eR16G16B16Sfloat;
+ return VK_FORMAT_R16G16B16_SFLOAT;
case Maxwell::VertexAttribute::Size::Size_16_16_16_16:
- return vk::Format::eR16G16B16A16Sfloat;
+ return VK_FORMAT_R16G16B16A16_SFLOAT;
default:
break;
}
@@ -450,210 +463,210 @@ vk::Format VertexFormat(Maxwell::VertexAttribute::Type type, Maxwell::VertexAttr
return {};
}
-vk::CompareOp ComparisonOp(Maxwell::ComparisonOp comparison) {
+VkCompareOp ComparisonOp(Maxwell::ComparisonOp comparison) {
switch (comparison) {
case Maxwell::ComparisonOp::Never:
case Maxwell::ComparisonOp::NeverOld:
- return vk::CompareOp::eNever;
+ return VK_COMPARE_OP_NEVER;
case Maxwell::ComparisonOp::Less:
case Maxwell::ComparisonOp::LessOld:
- return vk::CompareOp::eLess;
+ return VK_COMPARE_OP_LESS;
case Maxwell::ComparisonOp::Equal:
case Maxwell::ComparisonOp::EqualOld:
- return vk::CompareOp::eEqual;
+ return VK_COMPARE_OP_EQUAL;
case Maxwell::ComparisonOp::LessEqual:
case Maxwell::ComparisonOp::LessEqualOld:
- return vk::CompareOp::eLessOrEqual;
+ return VK_COMPARE_OP_LESS_OR_EQUAL;
case Maxwell::ComparisonOp::Greater:
case Maxwell::ComparisonOp::GreaterOld:
- return vk::CompareOp::eGreater;
+ return VK_COMPARE_OP_GREATER;
case Maxwell::ComparisonOp::NotEqual:
case Maxwell::ComparisonOp::NotEqualOld:
- return vk::CompareOp::eNotEqual;
+ return VK_COMPARE_OP_NOT_EQUAL;
case Maxwell::ComparisonOp::GreaterEqual:
case Maxwell::ComparisonOp::GreaterEqualOld:
- return vk::CompareOp::eGreaterOrEqual;
+ return VK_COMPARE_OP_GREATER_OR_EQUAL;
case Maxwell::ComparisonOp::Always:
case Maxwell::ComparisonOp::AlwaysOld:
- return vk::CompareOp::eAlways;
+ return VK_COMPARE_OP_ALWAYS;
}
UNIMPLEMENTED_MSG("Unimplemented comparison op={}", static_cast<u32>(comparison));
return {};
}
-vk::IndexType IndexFormat(const VKDevice& device, Maxwell::IndexFormat index_format) {
+VkIndexType IndexFormat(const VKDevice& device, Maxwell::IndexFormat index_format) {
switch (index_format) {
case Maxwell::IndexFormat::UnsignedByte:
if (!device.IsExtIndexTypeUint8Supported()) {
UNIMPLEMENTED_MSG("Native uint8 indices are not supported on this device");
- return vk::IndexType::eUint16;
+ return VK_INDEX_TYPE_UINT16;
}
- return vk::IndexType::eUint8EXT;
+ return VK_INDEX_TYPE_UINT8_EXT;
case Maxwell::IndexFormat::UnsignedShort:
- return vk::IndexType::eUint16;
+ return VK_INDEX_TYPE_UINT16;
case Maxwell::IndexFormat::UnsignedInt:
- return vk::IndexType::eUint32;
+ return VK_INDEX_TYPE_UINT32;
}
UNIMPLEMENTED_MSG("Unimplemented index_format={}", static_cast<u32>(index_format));
return {};
}
-vk::StencilOp StencilOp(Maxwell::StencilOp stencil_op) {
+VkStencilOp StencilOp(Maxwell::StencilOp stencil_op) {
switch (stencil_op) {
case Maxwell::StencilOp::Keep:
case Maxwell::StencilOp::KeepOGL:
- return vk::StencilOp::eKeep;
+ return VK_STENCIL_OP_KEEP;
case Maxwell::StencilOp::Zero:
case Maxwell::StencilOp::ZeroOGL:
- return vk::StencilOp::eZero;
+ return VK_STENCIL_OP_ZERO;
case Maxwell::StencilOp::Replace:
case Maxwell::StencilOp::ReplaceOGL:
- return vk::StencilOp::eReplace;
+ return VK_STENCIL_OP_REPLACE;
case Maxwell::StencilOp::Incr:
case Maxwell::StencilOp::IncrOGL:
- return vk::StencilOp::eIncrementAndClamp;
+ return VK_STENCIL_OP_INCREMENT_AND_CLAMP;
case Maxwell::StencilOp::Decr:
case Maxwell::StencilOp::DecrOGL:
- return vk::StencilOp::eDecrementAndClamp;
+ return VK_STENCIL_OP_DECREMENT_AND_CLAMP;
case Maxwell::StencilOp::Invert:
case Maxwell::StencilOp::InvertOGL:
- return vk::StencilOp::eInvert;
+ return VK_STENCIL_OP_INVERT;
case Maxwell::StencilOp::IncrWrap:
case Maxwell::StencilOp::IncrWrapOGL:
- return vk::StencilOp::eIncrementAndWrap;
+ return VK_STENCIL_OP_INCREMENT_AND_WRAP;
case Maxwell::StencilOp::DecrWrap:
case Maxwell::StencilOp::DecrWrapOGL:
- return vk::StencilOp::eDecrementAndWrap;
+ return VK_STENCIL_OP_DECREMENT_AND_WRAP;
}
UNIMPLEMENTED_MSG("Unimplemented stencil op={}", static_cast<u32>(stencil_op));
return {};
}
-vk::BlendOp BlendEquation(Maxwell::Blend::Equation equation) {
+VkBlendOp BlendEquation(Maxwell::Blend::Equation equation) {
switch (equation) {
case Maxwell::Blend::Equation::Add:
case Maxwell::Blend::Equation::AddGL:
- return vk::BlendOp::eAdd;
+ return VK_BLEND_OP_ADD;
case Maxwell::Blend::Equation::Subtract:
case Maxwell::Blend::Equation::SubtractGL:
- return vk::BlendOp::eSubtract;
+ return VK_BLEND_OP_SUBTRACT;
case Maxwell::Blend::Equation::ReverseSubtract:
case Maxwell::Blend::Equation::ReverseSubtractGL:
- return vk::BlendOp::eReverseSubtract;
+ return VK_BLEND_OP_REVERSE_SUBTRACT;
case Maxwell::Blend::Equation::Min:
case Maxwell::Blend::Equation::MinGL:
- return vk::BlendOp::eMin;
+ return VK_BLEND_OP_MIN;
case Maxwell::Blend::Equation::Max:
case Maxwell::Blend::Equation::MaxGL:
- return vk::BlendOp::eMax;
+ return VK_BLEND_OP_MAX;
}
UNIMPLEMENTED_MSG("Unimplemented blend equation={}", static_cast<u32>(equation));
return {};
}
-vk::BlendFactor BlendFactor(Maxwell::Blend::Factor factor) {
+VkBlendFactor BlendFactor(Maxwell::Blend::Factor factor) {
switch (factor) {
case Maxwell::Blend::Factor::Zero:
case Maxwell::Blend::Factor::ZeroGL:
- return vk::BlendFactor::eZero;
+ return VK_BLEND_FACTOR_ZERO;
case Maxwell::Blend::Factor::One:
case Maxwell::Blend::Factor::OneGL:
- return vk::BlendFactor::eOne;
+ return VK_BLEND_FACTOR_ONE;
case Maxwell::Blend::Factor::SourceColor:
case Maxwell::Blend::Factor::SourceColorGL:
- return vk::BlendFactor::eSrcColor;
+ return VK_BLEND_FACTOR_SRC_COLOR;
case Maxwell::Blend::Factor::OneMinusSourceColor:
case Maxwell::Blend::Factor::OneMinusSourceColorGL:
- return vk::BlendFactor::eOneMinusSrcColor;
+ return VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR;
case Maxwell::Blend::Factor::SourceAlpha:
case Maxwell::Blend::Factor::SourceAlphaGL:
- return vk::BlendFactor::eSrcAlpha;
+ return VK_BLEND_FACTOR_SRC_ALPHA;
case Maxwell::Blend::Factor::OneMinusSourceAlpha:
case Maxwell::Blend::Factor::OneMinusSourceAlphaGL:
- return vk::BlendFactor::eOneMinusSrcAlpha;
+ return VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA;
case Maxwell::Blend::Factor::DestAlpha:
case Maxwell::Blend::Factor::DestAlphaGL:
- return vk::BlendFactor::eDstAlpha;
+ return VK_BLEND_FACTOR_DST_ALPHA;
case Maxwell::Blend::Factor::OneMinusDestAlpha:
case Maxwell::Blend::Factor::OneMinusDestAlphaGL:
- return vk::BlendFactor::eOneMinusDstAlpha;
+ return VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA;
case Maxwell::Blend::Factor::DestColor:
case Maxwell::Blend::Factor::DestColorGL:
- return vk::BlendFactor::eDstColor;
+ return VK_BLEND_FACTOR_DST_COLOR;
case Maxwell::Blend::Factor::OneMinusDestColor:
case Maxwell::Blend::Factor::OneMinusDestColorGL:
- return vk::BlendFactor::eOneMinusDstColor;
+ return VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR;
case Maxwell::Blend::Factor::SourceAlphaSaturate:
case Maxwell::Blend::Factor::SourceAlphaSaturateGL:
- return vk::BlendFactor::eSrcAlphaSaturate;
+ return VK_BLEND_FACTOR_SRC_ALPHA_SATURATE;
case Maxwell::Blend::Factor::Source1Color:
case Maxwell::Blend::Factor::Source1ColorGL:
- return vk::BlendFactor::eSrc1Color;
+ return VK_BLEND_FACTOR_SRC1_COLOR;
case Maxwell::Blend::Factor::OneMinusSource1Color:
case Maxwell::Blend::Factor::OneMinusSource1ColorGL:
- return vk::BlendFactor::eOneMinusSrc1Color;
+ return VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR;
case Maxwell::Blend::Factor::Source1Alpha:
case Maxwell::Blend::Factor::Source1AlphaGL:
- return vk::BlendFactor::eSrc1Alpha;
+ return VK_BLEND_FACTOR_SRC1_ALPHA;
case Maxwell::Blend::Factor::OneMinusSource1Alpha:
case Maxwell::Blend::Factor::OneMinusSource1AlphaGL:
- return vk::BlendFactor::eOneMinusSrc1Alpha;
+ return VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA;
case Maxwell::Blend::Factor::ConstantColor:
case Maxwell::Blend::Factor::ConstantColorGL:
- return vk::BlendFactor::eConstantColor;
+ return VK_BLEND_FACTOR_CONSTANT_COLOR;
case Maxwell::Blend::Factor::OneMinusConstantColor:
case Maxwell::Blend::Factor::OneMinusConstantColorGL:
- return vk::BlendFactor::eOneMinusConstantColor;
+ return VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR;
case Maxwell::Blend::Factor::ConstantAlpha:
case Maxwell::Blend::Factor::ConstantAlphaGL:
- return vk::BlendFactor::eConstantAlpha;
+ return VK_BLEND_FACTOR_CONSTANT_ALPHA;
case Maxwell::Blend::Factor::OneMinusConstantAlpha:
case Maxwell::Blend::Factor::OneMinusConstantAlphaGL:
- return vk::BlendFactor::eOneMinusConstantAlpha;
+ return VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA;
}
UNIMPLEMENTED_MSG("Unimplemented blend factor={}", static_cast<u32>(factor));
return {};
}
-vk::FrontFace FrontFace(Maxwell::FrontFace front_face) {
+VkFrontFace FrontFace(Maxwell::FrontFace front_face) {
switch (front_face) {
case Maxwell::FrontFace::ClockWise:
- return vk::FrontFace::eClockwise;
+ return VK_FRONT_FACE_CLOCKWISE;
case Maxwell::FrontFace::CounterClockWise:
- return vk::FrontFace::eCounterClockwise;
+ return VK_FRONT_FACE_COUNTER_CLOCKWISE;
}
UNIMPLEMENTED_MSG("Unimplemented front face={}", static_cast<u32>(front_face));
return {};
}
-vk::CullModeFlags CullFace(Maxwell::CullFace cull_face) {
+VkCullModeFlags CullFace(Maxwell::CullFace cull_face) {
switch (cull_face) {
case Maxwell::CullFace::Front:
- return vk::CullModeFlagBits::eFront;
+ return VK_CULL_MODE_FRONT_BIT;
case Maxwell::CullFace::Back:
- return vk::CullModeFlagBits::eBack;
+ return VK_CULL_MODE_BACK_BIT;
case Maxwell::CullFace::FrontAndBack:
- return vk::CullModeFlagBits::eFrontAndBack;
+ return VK_CULL_MODE_FRONT_AND_BACK;
}
UNIMPLEMENTED_MSG("Unimplemented cull face={}", static_cast<u32>(cull_face));
return {};
}
-vk::ComponentSwizzle SwizzleSource(Tegra::Texture::SwizzleSource swizzle) {
+VkComponentSwizzle SwizzleSource(Tegra::Texture::SwizzleSource swizzle) {
switch (swizzle) {
case Tegra::Texture::SwizzleSource::Zero:
- return vk::ComponentSwizzle::eZero;
+ return VK_COMPONENT_SWIZZLE_ZERO;
case Tegra::Texture::SwizzleSource::R:
- return vk::ComponentSwizzle::eR;
+ return VK_COMPONENT_SWIZZLE_R;
case Tegra::Texture::SwizzleSource::G:
- return vk::ComponentSwizzle::eG;
+ return VK_COMPONENT_SWIZZLE_G;
case Tegra::Texture::SwizzleSource::B:
- return vk::ComponentSwizzle::eB;
+ return VK_COMPONENT_SWIZZLE_B;
case Tegra::Texture::SwizzleSource::A:
- return vk::ComponentSwizzle::eA;
+ return VK_COMPONENT_SWIZZLE_A;
case Tegra::Texture::SwizzleSource::OneInt:
case Tegra::Texture::SwizzleSource::OneFloat:
- return vk::ComponentSwizzle::eOne;
+ return VK_COMPONENT_SWIZZLE_ONE;
}
UNIMPLEMENTED_MSG("Unimplemented swizzle source={}", static_cast<u32>(swizzle));
return {};
diff --git a/src/video_core/renderer_vulkan/maxwell_to_vk.h b/src/video_core/renderer_vulkan/maxwell_to_vk.h
index 24f6ab544..81bce4c6c 100644
--- a/src/video_core/renderer_vulkan/maxwell_to_vk.h
+++ b/src/video_core/renderer_vulkan/maxwell_to_vk.h
@@ -6,8 +6,8 @@
#include "common/common_types.h"
#include "video_core/engines/maxwell_3d.h"
-#include "video_core/renderer_vulkan/declarations.h"
#include "video_core/renderer_vulkan/vk_device.h"
+#include "video_core/renderer_vulkan/wrapper.h"
#include "video_core/surface.h"
#include "video_core/textures/texture.h"
@@ -18,46 +18,45 @@ using PixelFormat = VideoCore::Surface::PixelFormat;
namespace Sampler {
-vk::Filter Filter(Tegra::Texture::TextureFilter filter);
+VkFilter Filter(Tegra::Texture::TextureFilter filter);
-vk::SamplerMipmapMode MipmapMode(Tegra::Texture::TextureMipmapFilter mipmap_filter);
+VkSamplerMipmapMode MipmapMode(Tegra::Texture::TextureMipmapFilter mipmap_filter);
-vk::SamplerAddressMode WrapMode(const VKDevice& device, Tegra::Texture::WrapMode wrap_mode,
- Tegra::Texture::TextureFilter filter);
+VkSamplerAddressMode WrapMode(const VKDevice& device, Tegra::Texture::WrapMode wrap_mode,
+ Tegra::Texture::TextureFilter filter);
-vk::CompareOp DepthCompareFunction(Tegra::Texture::DepthCompareFunc depth_compare_func);
+VkCompareOp DepthCompareFunction(Tegra::Texture::DepthCompareFunc depth_compare_func);
} // namespace Sampler
struct FormatInfo {
- vk::Format format;
+ VkFormat format;
bool attachable;
bool storage;
};
FormatInfo SurfaceFormat(const VKDevice& device, FormatType format_type, PixelFormat pixel_format);
-vk::ShaderStageFlagBits ShaderStage(Tegra::Engines::ShaderType stage);
+VkShaderStageFlagBits ShaderStage(Tegra::Engines::ShaderType stage);
-vk::PrimitiveTopology PrimitiveTopology(const VKDevice& device,
- Maxwell::PrimitiveTopology topology);
+VkPrimitiveTopology PrimitiveTopology(const VKDevice& device, Maxwell::PrimitiveTopology topology);
-vk::Format VertexFormat(Maxwell::VertexAttribute::Type type, Maxwell::VertexAttribute::Size size);
+VkFormat VertexFormat(Maxwell::VertexAttribute::Type type, Maxwell::VertexAttribute::Size size);
-vk::CompareOp ComparisonOp(Maxwell::ComparisonOp comparison);
+VkCompareOp ComparisonOp(Maxwell::ComparisonOp comparison);
-vk::IndexType IndexFormat(const VKDevice& device, Maxwell::IndexFormat index_format);
+VkIndexType IndexFormat(const VKDevice& device, Maxwell::IndexFormat index_format);
-vk::StencilOp StencilOp(Maxwell::StencilOp stencil_op);
+VkStencilOp StencilOp(Maxwell::StencilOp stencil_op);
-vk::BlendOp BlendEquation(Maxwell::Blend::Equation equation);
+VkBlendOp BlendEquation(Maxwell::Blend::Equation equation);
-vk::BlendFactor BlendFactor(Maxwell::Blend::Factor factor);
+VkBlendFactor BlendFactor(Maxwell::Blend::Factor factor);
-vk::FrontFace FrontFace(Maxwell::FrontFace front_face);
+VkFrontFace FrontFace(Maxwell::FrontFace front_face);
-vk::CullModeFlags CullFace(Maxwell::CullFace cull_face);
+VkCullModeFlags CullFace(Maxwell::CullFace cull_face);
-vk::ComponentSwizzle SwizzleSource(Tegra::Texture::SwizzleSource swizzle);
+VkComponentSwizzle SwizzleSource(Tegra::Texture::SwizzleSource swizzle);
} // namespace Vulkan::MaxwellToVK
diff --git a/src/video_core/renderer_vulkan/renderer_vulkan.cpp b/src/video_core/renderer_vulkan/renderer_vulkan.cpp
index 6953aaafe..dd590c38b 100644
--- a/src/video_core/renderer_vulkan/renderer_vulkan.cpp
+++ b/src/video_core/renderer_vulkan/renderer_vulkan.cpp
@@ -2,13 +2,18 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
+#include <algorithm>
+#include <array>
+#include <cstring>
#include <memory>
#include <optional>
+#include <string>
#include <vector>
#include <fmt/format.h>
#include "common/assert.h"
+#include "common/dynamic_library.h"
#include "common/logging/log.h"
#include "common/telemetry.h"
#include "core/core.h"
@@ -19,7 +24,6 @@
#include "core/settings.h"
#include "core/telemetry_session.h"
#include "video_core/gpu.h"
-#include "video_core/renderer_vulkan/declarations.h"
#include "video_core/renderer_vulkan/renderer_vulkan.h"
#include "video_core/renderer_vulkan/vk_blit_screen.h"
#include "video_core/renderer_vulkan/vk_device.h"
@@ -29,30 +33,145 @@
#include "video_core/renderer_vulkan/vk_scheduler.h"
#include "video_core/renderer_vulkan/vk_state_tracker.h"
#include "video_core/renderer_vulkan/vk_swapchain.h"
+#include "video_core/renderer_vulkan/wrapper.h"
+
+// Include these late to avoid polluting previous headers
+#ifdef _WIN32
+#include <windows.h>
+// ensure include order
+#include <vulkan/vulkan_win32.h>
+#endif
+
+#ifdef __linux__
+#include <X11/Xlib.h>
+#include <vulkan/vulkan_wayland.h>
+#include <vulkan/vulkan_xlib.h>
+#endif
namespace Vulkan {
namespace {
-VkBool32 DebugCallback(VkDebugUtilsMessageSeverityFlagBitsEXT severity_,
+using Core::Frontend::WindowSystemType;
+
+VkBool32 DebugCallback(VkDebugUtilsMessageSeverityFlagBitsEXT severity,
VkDebugUtilsMessageTypeFlagsEXT type,
const VkDebugUtilsMessengerCallbackDataEXT* data,
[[maybe_unused]] void* user_data) {
- const vk::DebugUtilsMessageSeverityFlagBitsEXT severity{severity_};
const char* message{data->pMessage};
- if (severity & vk::DebugUtilsMessageSeverityFlagBitsEXT::eError) {
+ if (severity & VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT) {
LOG_CRITICAL(Render_Vulkan, "{}", message);
- } else if (severity & vk::DebugUtilsMessageSeverityFlagBitsEXT::eWarning) {
+ } else if (severity & VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT) {
LOG_WARNING(Render_Vulkan, "{}", message);
- } else if (severity & vk::DebugUtilsMessageSeverityFlagBitsEXT::eInfo) {
+ } else if (severity & VK_DEBUG_UTILS_MESSAGE_SEVERITY_INFO_BIT_EXT) {
LOG_INFO(Render_Vulkan, "{}", message);
- } else if (severity & vk::DebugUtilsMessageSeverityFlagBitsEXT::eVerbose) {
+ } else if (severity & VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT) {
LOG_DEBUG(Render_Vulkan, "{}", message);
}
return VK_FALSE;
}
+Common::DynamicLibrary OpenVulkanLibrary() {
+ Common::DynamicLibrary library;
+#ifdef __APPLE__
+ // Check if a path to a specific Vulkan library has been specified.
+ char* libvulkan_env = getenv("LIBVULKAN_PATH");
+ if (!libvulkan_env || !library.Open(libvulkan_env)) {
+ // Use the libvulkan.dylib from the application bundle.
+ std::string filename = File::GetBundleDirectory() + "/Contents/Frameworks/libvulkan.dylib";
+ library.Open(filename.c_str());
+ }
+#else
+ std::string filename = Common::DynamicLibrary::GetVersionedFilename("vulkan", 1);
+ if (!library.Open(filename.c_str())) {
+ // Android devices may not have libvulkan.so.1, only libvulkan.so.
+ filename = Common::DynamicLibrary::GetVersionedFilename("vulkan");
+ library.Open(filename.c_str());
+ }
+#endif
+ return library;
+}
+
+vk::Instance CreateInstance(Common::DynamicLibrary& library, vk::InstanceDispatch& dld,
+ WindowSystemType window_type = WindowSystemType::Headless,
+ bool enable_layers = false) {
+ if (!library.IsOpen()) {
+ LOG_ERROR(Render_Vulkan, "Vulkan library not available");
+ return {};
+ }
+ if (!library.GetSymbol("vkGetInstanceProcAddr", &dld.vkGetInstanceProcAddr)) {
+ LOG_ERROR(Render_Vulkan, "vkGetInstanceProcAddr not present in Vulkan");
+ return {};
+ }
+ if (!vk::Load(dld)) {
+ LOG_ERROR(Render_Vulkan, "Failed to load Vulkan function pointers");
+ return {};
+ }
+
+ std::vector<const char*> extensions;
+ extensions.reserve(6);
+ switch (window_type) {
+ case Core::Frontend::WindowSystemType::Headless:
+ break;
+#ifdef _WIN32
+ case Core::Frontend::WindowSystemType::Windows:
+ extensions.push_back(VK_KHR_WIN32_SURFACE_EXTENSION_NAME);
+ break;
+#endif
+#ifdef __linux__
+ case Core::Frontend::WindowSystemType::X11:
+ extensions.push_back(VK_KHR_XLIB_SURFACE_EXTENSION_NAME);
+ break;
+ case Core::Frontend::WindowSystemType::Wayland:
+ extensions.push_back(VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME);
+ break;
+#endif
+ default:
+ LOG_ERROR(Render_Vulkan, "Presentation not supported on this platform");
+ break;
+ }
+ if (window_type != Core::Frontend::WindowSystemType::Headless) {
+ extensions.push_back(VK_KHR_SURFACE_EXTENSION_NAME);
+ }
+ if (enable_layers) {
+ extensions.push_back(VK_EXT_DEBUG_UTILS_EXTENSION_NAME);
+ }
+ extensions.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
+
+ const std::optional properties = vk::EnumerateInstanceExtensionProperties(dld);
+ if (!properties) {
+ LOG_ERROR(Render_Vulkan, "Failed to query extension properties");
+ return {};
+ }
+
+ for (const char* extension : extensions) {
+ const auto it =
+ std::find_if(properties->begin(), properties->end(), [extension](const auto& prop) {
+ return !std::strcmp(extension, prop.extensionName);
+ });
+ if (it == properties->end()) {
+ LOG_ERROR(Render_Vulkan, "Required instance extension {} is not available", extension);
+ return {};
+ }
+ }
+
+ static constexpr std::array layers_data{"VK_LAYER_LUNARG_standard_validation"};
+ vk::Span<const char*> layers = layers_data;
+ if (!enable_layers) {
+ layers = {};
+ }
+ vk::Instance instance = vk::Instance::Create(layers, extensions, dld);
+ if (!instance) {
+ LOG_ERROR(Render_Vulkan, "Failed to create Vulkan instance");
+ return {};
+ }
+ if (!vk::Load(*instance, dld)) {
+ LOG_ERROR(Render_Vulkan, "Failed to load Vulkan instance function pointers");
+ }
+ return instance;
+}
+
std::string GetReadableVersion(u32 version) {
return fmt::format("{}.{}.{}", VK_VERSION_MAJOR(version), VK_VERSION_MINOR(version),
VK_VERSION_PATCH(version));
@@ -63,14 +182,14 @@ std::string GetDriverVersion(const VKDevice& device) {
// https://github.com/SaschaWillems/vulkan.gpuinfo.org/blob/5dddea46ea1120b0df14eef8f15ff8e318e35462/functions.php#L308-L314
const u32 version = device.GetDriverVersion();
- if (device.GetDriverID() == vk::DriverIdKHR::eNvidiaProprietary) {
+ if (device.GetDriverID() == VK_DRIVER_ID_NVIDIA_PROPRIETARY_KHR) {
const u32 major = (version >> 22) & 0x3ff;
const u32 minor = (version >> 14) & 0x0ff;
const u32 secondary = (version >> 6) & 0x0ff;
const u32 tertiary = version & 0x003f;
return fmt::format("{}.{}.{}.{}", major, minor, secondary, tertiary);
}
- if (device.GetDriverID() == vk::DriverIdKHR::eIntelProprietaryWindows) {
+ if (device.GetDriverID() == VK_DRIVER_ID_INTEL_PROPRIETARY_WINDOWS_KHR) {
const u32 major = version >> 14;
const u32 minor = version & 0x3fff;
return fmt::format("{}.{}", major, minor);
@@ -147,27 +266,12 @@ bool RendererVulkan::TryPresent(int /*timeout_ms*/) {
}
bool RendererVulkan::Init() {
- PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr{};
- render_window.RetrieveVulkanHandlers(&vkGetInstanceProcAddr, &instance, &surface);
- const vk::DispatchLoaderDynamic dldi(instance, vkGetInstanceProcAddr);
-
- std::optional<vk::DebugUtilsMessengerEXT> callback;
- if (Settings::values.renderer_debug && dldi.vkCreateDebugUtilsMessengerEXT) {
- callback = CreateDebugCallback(dldi);
- if (!callback) {
- return false;
- }
- }
-
- if (!PickDevices(dldi)) {
- if (callback) {
- instance.destroy(*callback, nullptr, dldi);
- }
+ library = OpenVulkanLibrary();
+ instance = CreateInstance(library, dld, render_window.GetWindowInfo().type,
+ Settings::values.renderer_debug);
+ if (!instance || !CreateDebugCallback() || !CreateSurface() || !PickDevices()) {
return false;
}
- debug_callback = UniqueDebugUtilsMessengerEXT(
- *callback, vk::ObjectDestroy<vk::Instance, vk::DispatchLoaderDynamic>(
- instance, nullptr, device->GetDispatchLoader()));
Report();
@@ -176,7 +280,7 @@ bool RendererVulkan::Init() {
resource_manager = std::make_unique<VKResourceManager>(*device);
const auto& framebuffer = render_window.GetFramebufferLayout();
- swapchain = std::make_unique<VKSwapchain>(surface, *device);
+ swapchain = std::make_unique<VKSwapchain>(*surface, *device);
swapchain->Create(framebuffer.width, framebuffer.height, false);
state_tracker = std::make_unique<StateTracker>(system);
@@ -198,10 +302,8 @@ void RendererVulkan::ShutDown() {
if (!device) {
return;
}
- const auto dev = device->GetLogical();
- const auto& dld = device->GetDispatchLoader();
- if (dev && dld.vkDeviceWaitIdle) {
- dev.waitIdle(dld);
+ if (const auto& dev = device->GetLogical()) {
+ dev.WaitIdle();
}
rasterizer.reset();
@@ -213,44 +315,94 @@ void RendererVulkan::ShutDown() {
device.reset();
}
-std::optional<vk::DebugUtilsMessengerEXT> RendererVulkan::CreateDebugCallback(
- const vk::DispatchLoaderDynamic& dldi) {
- const vk::DebugUtilsMessengerCreateInfoEXT callback_ci(
- {},
- vk::DebugUtilsMessageSeverityFlagBitsEXT::eError |
- vk::DebugUtilsMessageSeverityFlagBitsEXT::eWarning |
- vk::DebugUtilsMessageSeverityFlagBitsEXT::eInfo |
- vk::DebugUtilsMessageSeverityFlagBitsEXT::eVerbose,
- vk::DebugUtilsMessageTypeFlagBitsEXT::eGeneral |
- vk::DebugUtilsMessageTypeFlagBitsEXT::eValidation |
- vk::DebugUtilsMessageTypeFlagBitsEXT::ePerformance,
- &DebugCallback, nullptr);
- vk::DebugUtilsMessengerEXT callback;
- if (instance.createDebugUtilsMessengerEXT(&callback_ci, nullptr, &callback, dldi) !=
- vk::Result::eSuccess) {
+bool RendererVulkan::CreateDebugCallback() {
+ if (!Settings::values.renderer_debug) {
+ return true;
+ }
+ debug_callback = instance.TryCreateDebugCallback(DebugCallback);
+ if (!debug_callback) {
LOG_ERROR(Render_Vulkan, "Failed to create debug callback");
- return {};
+ return false;
}
- return callback;
+ return true;
}
-bool RendererVulkan::PickDevices(const vk::DispatchLoaderDynamic& dldi) {
- const auto devices = instance.enumeratePhysicalDevices(dldi);
+bool RendererVulkan::CreateSurface() {
+ [[maybe_unused]] const auto& window_info = render_window.GetWindowInfo();
+ VkSurfaceKHR unsafe_surface = nullptr;
+
+#ifdef _WIN32
+ if (window_info.type == Core::Frontend::WindowSystemType::Windows) {
+ const HWND hWnd = static_cast<HWND>(window_info.render_surface);
+ const VkWin32SurfaceCreateInfoKHR win32_ci{VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR,
+ nullptr, 0, nullptr, hWnd};
+ const auto vkCreateWin32SurfaceKHR = reinterpret_cast<PFN_vkCreateWin32SurfaceKHR>(
+ dld.vkGetInstanceProcAddr(*instance, "vkCreateWin32SurfaceKHR"));
+ if (!vkCreateWin32SurfaceKHR ||
+ vkCreateWin32SurfaceKHR(*instance, &win32_ci, nullptr, &unsafe_surface) != VK_SUCCESS) {
+ LOG_ERROR(Render_Vulkan, "Failed to initialize Win32 surface");
+ return false;
+ }
+ }
+#endif
+#ifdef __linux__
+ if (window_info.type == Core::Frontend::WindowSystemType::X11) {
+ const VkXlibSurfaceCreateInfoKHR xlib_ci{
+ VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR, nullptr, 0,
+ static_cast<Display*>(window_info.display_connection),
+ reinterpret_cast<Window>(window_info.render_surface)};
+ const auto vkCreateXlibSurfaceKHR = reinterpret_cast<PFN_vkCreateXlibSurfaceKHR>(
+ dld.vkGetInstanceProcAddr(*instance, "vkCreateXlibSurfaceKHR"));
+ if (!vkCreateXlibSurfaceKHR ||
+ vkCreateXlibSurfaceKHR(*instance, &xlib_ci, nullptr, &unsafe_surface) != VK_SUCCESS) {
+ LOG_ERROR(Render_Vulkan, "Failed to initialize Xlib surface");
+ return false;
+ }
+ }
+ if (window_info.type == Core::Frontend::WindowSystemType::Wayland) {
+ const VkWaylandSurfaceCreateInfoKHR wayland_ci{
+ VK_STRUCTURE_TYPE_WAYLAND_SURFACE_CREATE_INFO_KHR, nullptr, 0,
+ static_cast<wl_display*>(window_info.display_connection),
+ static_cast<wl_surface*>(window_info.render_surface)};
+ const auto vkCreateWaylandSurfaceKHR = reinterpret_cast<PFN_vkCreateWaylandSurfaceKHR>(
+ dld.vkGetInstanceProcAddr(*instance, "vkCreateWaylandSurfaceKHR"));
+ if (!vkCreateWaylandSurfaceKHR ||
+ vkCreateWaylandSurfaceKHR(*instance, &wayland_ci, nullptr, &unsafe_surface) !=
+ VK_SUCCESS) {
+ LOG_ERROR(Render_Vulkan, "Failed to initialize Wayland surface");
+ return false;
+ }
+ }
+#endif
+ if (!unsafe_surface) {
+ LOG_ERROR(Render_Vulkan, "Presentation not supported on this platform");
+ return false;
+ }
+
+ surface = vk::SurfaceKHR(unsafe_surface, *instance, dld);
+ return true;
+}
+
+bool RendererVulkan::PickDevices() {
+ const auto devices = instance.EnumeratePhysicalDevices();
+ if (!devices) {
+ LOG_ERROR(Render_Vulkan, "Failed to enumerate physical devices");
+ return false;
+ }
- // TODO(Rodrigo): Choose device from config file
const s32 device_index = Settings::values.vulkan_device;
- if (device_index < 0 || device_index >= static_cast<s32>(devices.size())) {
+ if (device_index < 0 || device_index >= static_cast<s32>(devices->size())) {
LOG_ERROR(Render_Vulkan, "Invalid device index {}!", device_index);
return false;
}
- const vk::PhysicalDevice physical_device = devices[device_index];
-
- if (!VKDevice::IsSuitable(dldi, physical_device, surface)) {
+ const vk::PhysicalDevice physical_device((*devices)[static_cast<std::size_t>(device_index)],
+ dld);
+ if (!VKDevice::IsSuitable(physical_device, *surface)) {
return false;
}
- device = std::make_unique<VKDevice>(dldi, physical_device, surface);
- return device->Create(dldi, instance);
+ device = std::make_unique<VKDevice>(*instance, physical_device, *surface, dld);
+ return device->Create();
}
void RendererVulkan::Report() const {
@@ -276,4 +428,25 @@ void RendererVulkan::Report() const {
telemetry_session.AddField(field, "GPU_Vulkan_Extensions", extensions);
}
+std::vector<std::string> RendererVulkan::EnumerateDevices() {
+ vk::InstanceDispatch dld;
+ Common::DynamicLibrary library = OpenVulkanLibrary();
+ vk::Instance instance = CreateInstance(library, dld);
+ if (!instance) {
+ return {};
+ }
+
+ const std::optional physical_devices = instance.EnumeratePhysicalDevices();
+ if (!physical_devices) {
+ return {};
+ }
+
+ std::vector<std::string> names;
+ names.reserve(physical_devices->size());
+ for (const auto& device : *physical_devices) {
+ names.push_back(vk::PhysicalDevice(device, dld).GetProperties().deviceName);
+ }
+ return names;
+}
+
} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/renderer_vulkan.h b/src/video_core/renderer_vulkan/renderer_vulkan.h
index d14384e79..18270909b 100644
--- a/src/video_core/renderer_vulkan/renderer_vulkan.h
+++ b/src/video_core/renderer_vulkan/renderer_vulkan.h
@@ -6,10 +6,13 @@
#include <memory>
#include <optional>
+#include <string>
#include <vector>
+#include "common/dynamic_library.h"
+
#include "video_core/renderer_base.h"
-#include "video_core/renderer_vulkan/declarations.h"
+#include "video_core/renderer_vulkan/wrapper.h"
namespace Core {
class System;
@@ -44,22 +47,28 @@ public:
void SwapBuffers(const Tegra::FramebufferConfig* framebuffer) override;
bool TryPresent(int timeout_ms) override;
+ static std::vector<std::string> EnumerateDevices();
+
private:
- std::optional<vk::DebugUtilsMessengerEXT> CreateDebugCallback(
- const vk::DispatchLoaderDynamic& dldi);
+ bool CreateDebugCallback();
- bool PickDevices(const vk::DispatchLoaderDynamic& dldi);
+ bool CreateSurface();
+
+ bool PickDevices();
void Report() const;
Core::System& system;
+ Common::DynamicLibrary library;
+ vk::InstanceDispatch dld;
+
vk::Instance instance;
vk::SurfaceKHR surface;
VKScreenInfo screen_info;
- UniqueDebugUtilsMessengerEXT debug_callback;
+ vk::DebugCallback debug_callback;
std::unique_ptr<VKDevice> device;
std::unique_ptr<VKSwapchain> swapchain;
std::unique_ptr<VKMemoryManager> memory_manager;
diff --git a/src/video_core/renderer_vulkan/vk_blit_screen.cpp b/src/video_core/renderer_vulkan/vk_blit_screen.cpp
index 855cfc883..fbd406f2b 100644
--- a/src/video_core/renderer_vulkan/vk_blit_screen.cpp
+++ b/src/video_core/renderer_vulkan/vk_blit_screen.cpp
@@ -20,7 +20,6 @@
#include "video_core/gpu.h"
#include "video_core/morton.h"
#include "video_core/rasterizer_interface.h"
-#include "video_core/renderer_vulkan/declarations.h"
#include "video_core/renderer_vulkan/renderer_vulkan.h"
#include "video_core/renderer_vulkan/vk_blit_screen.h"
#include "video_core/renderer_vulkan/vk_device.h"
@@ -30,6 +29,7 @@
#include "video_core/renderer_vulkan/vk_scheduler.h"
#include "video_core/renderer_vulkan/vk_shader_util.h"
#include "video_core/renderer_vulkan/vk_swapchain.h"
+#include "video_core/renderer_vulkan/wrapper.h"
#include "video_core/surface.h"
namespace Vulkan {
@@ -140,16 +140,25 @@ struct ScreenRectVertex {
std::array<f32, 2> position;
std::array<f32, 2> tex_coord;
- static vk::VertexInputBindingDescription GetDescription() {
- return vk::VertexInputBindingDescription(0, sizeof(ScreenRectVertex),
- vk::VertexInputRate::eVertex);
+ static VkVertexInputBindingDescription GetDescription() {
+ VkVertexInputBindingDescription description;
+ description.binding = 0;
+ description.stride = sizeof(ScreenRectVertex);
+ description.inputRate = VK_VERTEX_INPUT_RATE_VERTEX;
+ return description;
}
- static std::array<vk::VertexInputAttributeDescription, 2> GetAttributes() {
- return {vk::VertexInputAttributeDescription(0, 0, vk::Format::eR32G32Sfloat,
- offsetof(ScreenRectVertex, position)),
- vk::VertexInputAttributeDescription(1, 0, vk::Format::eR32G32Sfloat,
- offsetof(ScreenRectVertex, tex_coord))};
+ static std::array<VkVertexInputAttributeDescription, 2> GetAttributes() {
+ std::array<VkVertexInputAttributeDescription, 2> attributes;
+ attributes[0].location = 0;
+ attributes[0].binding = 0;
+ attributes[0].format = VK_FORMAT_R32G32_SFLOAT;
+ attributes[0].offset = offsetof(ScreenRectVertex, position);
+ attributes[1].location = 1;
+ attributes[1].binding = 0;
+ attributes[1].format = VK_FORMAT_R32G32_SFLOAT;
+ attributes[1].offset = offsetof(ScreenRectVertex, tex_coord);
+ return attributes;
}
};
@@ -172,16 +181,16 @@ std::size_t GetSizeInBytes(const Tegra::FramebufferConfig& framebuffer) {
static_cast<std::size_t>(framebuffer.height) * GetBytesPerPixel(framebuffer);
}
-vk::Format GetFormat(const Tegra::FramebufferConfig& framebuffer) {
+VkFormat GetFormat(const Tegra::FramebufferConfig& framebuffer) {
switch (framebuffer.pixel_format) {
case Tegra::FramebufferConfig::PixelFormat::ABGR8:
- return vk::Format::eA8B8G8R8UnormPack32;
+ return VK_FORMAT_A8B8G8R8_UNORM_PACK32;
case Tegra::FramebufferConfig::PixelFormat::RGB565:
- return vk::Format::eR5G6B5UnormPack16;
+ return VK_FORMAT_R5G6B5_UNORM_PACK16;
default:
UNIMPLEMENTED_MSG("Unknown framebuffer pixel format: {}",
static_cast<u32>(framebuffer.pixel_format));
- return vk::Format::eA8B8G8R8UnormPack32;
+ return VK_FORMAT_A8B8G8R8_UNORM_PACK32;
}
}
@@ -219,8 +228,8 @@ void VKBlitScreen::Recreate() {
CreateDynamicResources();
}
-std::tuple<VKFence&, vk::Semaphore> VKBlitScreen::Draw(const Tegra::FramebufferConfig& framebuffer,
- bool use_accelerated) {
+std::tuple<VKFence&, VkSemaphore> VKBlitScreen::Draw(const Tegra::FramebufferConfig& framebuffer,
+ bool use_accelerated) {
RefreshResources(framebuffer);
// Finish any pending renderpass
@@ -255,46 +264,76 @@ std::tuple<VKFence&, vk::Semaphore> VKBlitScreen::Draw(const Tegra::FramebufferC
framebuffer.stride, block_height_log2, framebuffer.height, 0, 1, 1,
map.GetAddress() + image_offset, host_ptr);
- blit_image->Transition(0, 1, 0, 1, vk::PipelineStageFlagBits::eTransfer,
- vk::AccessFlagBits::eTransferWrite,
- vk::ImageLayout::eTransferDstOptimal);
-
- const vk::BufferImageCopy copy(image_offset, 0, 0,
- {vk::ImageAspectFlagBits::eColor, 0, 0, 1}, {0, 0, 0},
- {framebuffer.width, framebuffer.height, 1});
- scheduler.Record([buffer_handle = *buffer, image = blit_image->GetHandle(),
- copy](auto cmdbuf, auto& dld) {
- cmdbuf.copyBufferToImage(buffer_handle, image, vk::ImageLayout::eTransferDstOptimal,
- {copy}, dld);
- });
+ blit_image->Transition(0, 1, 0, 1, VK_PIPELINE_STAGE_TRANSFER_BIT,
+ VK_ACCESS_TRANSFER_WRITE_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
+
+ VkBufferImageCopy copy;
+ copy.bufferOffset = image_offset;
+ copy.bufferRowLength = 0;
+ copy.bufferImageHeight = 0;
+ copy.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
+ copy.imageSubresource.mipLevel = 0;
+ copy.imageSubresource.baseArrayLayer = 0;
+ copy.imageSubresource.layerCount = 1;
+ copy.imageOffset.x = 0;
+ copy.imageOffset.y = 0;
+ copy.imageOffset.z = 0;
+ copy.imageExtent.width = framebuffer.width;
+ copy.imageExtent.height = framebuffer.height;
+ copy.imageExtent.depth = 1;
+ scheduler.Record(
+ [buffer = *buffer, image = *blit_image->GetHandle(), copy](vk::CommandBuffer cmdbuf) {
+ cmdbuf.CopyBufferToImage(buffer, image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, copy);
+ });
}
map.Release();
- blit_image->Transition(0, 1, 0, 1, vk::PipelineStageFlagBits::eFragmentShader,
- vk::AccessFlagBits::eShaderRead,
- vk::ImageLayout::eShaderReadOnlyOptimal);
+ blit_image->Transition(0, 1, 0, 1, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
+ VK_ACCESS_SHADER_READ_BIT, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
scheduler.Record([renderpass = *renderpass, framebuffer = *framebuffers[image_index],
descriptor_set = descriptor_sets[image_index], buffer = *buffer,
size = swapchain.GetSize(), pipeline = *pipeline,
- layout = *pipeline_layout](auto cmdbuf, auto& dld) {
- const vk::ClearValue clear_color{std::array{0.0f, 0.0f, 0.0f, 1.0f}};
- const vk::RenderPassBeginInfo renderpass_bi(renderpass, framebuffer, {{0, 0}, size}, 1,
- &clear_color);
-
- cmdbuf.beginRenderPass(renderpass_bi, vk::SubpassContents::eInline, dld);
- cmdbuf.bindPipeline(vk::PipelineBindPoint::eGraphics, pipeline, dld);
- cmdbuf.setViewport(
- 0,
- {{0.0f, 0.0f, static_cast<f32>(size.width), static_cast<f32>(size.height), 0.0f, 1.0f}},
- dld);
- cmdbuf.setScissor(0, {{{0, 0}, size}}, dld);
-
- cmdbuf.bindVertexBuffers(0, {buffer}, {offsetof(BufferData, vertices)}, dld);
- cmdbuf.bindDescriptorSets(vk::PipelineBindPoint::eGraphics, layout, 0, {descriptor_set}, {},
- dld);
- cmdbuf.draw(4, 1, 0, 0, dld);
- cmdbuf.endRenderPass(dld);
+ layout = *pipeline_layout](vk::CommandBuffer cmdbuf) {
+ VkClearValue clear_color;
+ clear_color.color.float32[0] = 0.0f;
+ clear_color.color.float32[1] = 0.0f;
+ clear_color.color.float32[2] = 0.0f;
+ clear_color.color.float32[3] = 0.0f;
+
+ VkRenderPassBeginInfo renderpass_bi;
+ renderpass_bi.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
+ renderpass_bi.pNext = nullptr;
+ renderpass_bi.renderPass = renderpass;
+ renderpass_bi.framebuffer = framebuffer;
+ renderpass_bi.renderArea.offset.x = 0;
+ renderpass_bi.renderArea.offset.y = 0;
+ renderpass_bi.renderArea.extent = size;
+ renderpass_bi.clearValueCount = 1;
+ renderpass_bi.pClearValues = &clear_color;
+
+ VkViewport viewport;
+ viewport.x = 0.0f;
+ viewport.y = 0.0f;
+ viewport.width = static_cast<float>(size.width);
+ viewport.height = static_cast<float>(size.height);
+ viewport.minDepth = 0.0f;
+ viewport.maxDepth = 1.0f;
+
+ VkRect2D scissor;
+ scissor.offset.x = 0;
+ scissor.offset.y = 0;
+ scissor.extent = size;
+
+ cmdbuf.BeginRenderPass(renderpass_bi, VK_SUBPASS_CONTENTS_INLINE);
+ cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline);
+ cmdbuf.SetViewport(0, viewport);
+ cmdbuf.SetScissor(0, scissor);
+
+ cmdbuf.BindVertexBuffer(0, buffer, offsetof(BufferData, vertices));
+ cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_GRAPHICS, layout, 0, descriptor_set, {});
+ cmdbuf.Draw(4, 1, 0, 0);
+ cmdbuf.EndRenderPass();
});
return {scheduler.GetFence(), *semaphores[image_index]};
@@ -334,165 +373,297 @@ void VKBlitScreen::CreateShaders() {
}
void VKBlitScreen::CreateSemaphores() {
- const auto dev = device.GetLogical();
- const auto& dld = device.GetDispatchLoader();
-
semaphores.resize(image_count);
- for (std::size_t i = 0; i < image_count; ++i) {
- semaphores[i] = dev.createSemaphoreUnique({}, nullptr, dld);
- }
+ std::generate(semaphores.begin(), semaphores.end(),
+ [this] { return device.GetLogical().CreateSemaphore(); });
}
void VKBlitScreen::CreateDescriptorPool() {
- const std::array<vk::DescriptorPoolSize, 2> pool_sizes{
- vk::DescriptorPoolSize{vk::DescriptorType::eUniformBuffer, static_cast<u32>(image_count)},
- vk::DescriptorPoolSize{vk::DescriptorType::eCombinedImageSampler,
- static_cast<u32>(image_count)}};
- const vk::DescriptorPoolCreateInfo pool_ci(
- {}, static_cast<u32>(image_count), static_cast<u32>(pool_sizes.size()), pool_sizes.data());
- const auto dev = device.GetLogical();
- descriptor_pool = dev.createDescriptorPoolUnique(pool_ci, nullptr, device.GetDispatchLoader());
+ std::array<VkDescriptorPoolSize, 2> pool_sizes;
+ pool_sizes[0].type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
+ pool_sizes[0].descriptorCount = static_cast<u32>(image_count);
+ pool_sizes[1].type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
+ pool_sizes[1].descriptorCount = static_cast<u32>(image_count);
+
+ VkDescriptorPoolCreateInfo ci;
+ ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
+ ci.pNext = nullptr;
+ ci.flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT;
+ ci.maxSets = static_cast<u32>(image_count);
+ ci.poolSizeCount = static_cast<u32>(pool_sizes.size());
+ ci.pPoolSizes = pool_sizes.data();
+ descriptor_pool = device.GetLogical().CreateDescriptorPool(ci);
}
void VKBlitScreen::CreateRenderPass() {
- const vk::AttachmentDescription color_attachment(
- {}, swapchain.GetImageFormat(), vk::SampleCountFlagBits::e1, vk::AttachmentLoadOp::eClear,
- vk::AttachmentStoreOp::eStore, vk::AttachmentLoadOp::eDontCare,
- vk::AttachmentStoreOp::eDontCare, vk::ImageLayout::eUndefined,
- vk::ImageLayout::ePresentSrcKHR);
-
- const vk::AttachmentReference color_attachment_ref(0, vk::ImageLayout::eColorAttachmentOptimal);
-
- const vk::SubpassDescription subpass_description({}, vk::PipelineBindPoint::eGraphics, 0,
- nullptr, 1, &color_attachment_ref, nullptr,
- nullptr, 0, nullptr);
-
- const vk::SubpassDependency dependency(
- VK_SUBPASS_EXTERNAL, 0, vk::PipelineStageFlagBits::eColorAttachmentOutput,
- vk::PipelineStageFlagBits::eColorAttachmentOutput, {},
- vk::AccessFlagBits::eColorAttachmentRead | vk::AccessFlagBits::eColorAttachmentWrite, {});
-
- const vk::RenderPassCreateInfo renderpass_ci({}, 1, &color_attachment, 1, &subpass_description,
- 1, &dependency);
-
- const auto dev = device.GetLogical();
- renderpass = dev.createRenderPassUnique(renderpass_ci, nullptr, device.GetDispatchLoader());
+ VkAttachmentDescription color_attachment;
+ color_attachment.flags = 0;
+ color_attachment.format = swapchain.GetImageFormat();
+ color_attachment.samples = VK_SAMPLE_COUNT_1_BIT;
+ color_attachment.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
+ color_attachment.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
+ color_attachment.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
+ color_attachment.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
+ color_attachment.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
+ color_attachment.finalLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
+
+ VkAttachmentReference color_attachment_ref;
+ color_attachment_ref.attachment = 0;
+ color_attachment_ref.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+
+ VkSubpassDescription subpass_description;
+ subpass_description.flags = 0;
+ subpass_description.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
+ subpass_description.inputAttachmentCount = 0;
+ subpass_description.pInputAttachments = nullptr;
+ subpass_description.colorAttachmentCount = 1;
+ subpass_description.pColorAttachments = &color_attachment_ref;
+ subpass_description.pResolveAttachments = nullptr;
+ subpass_description.pDepthStencilAttachment = nullptr;
+ subpass_description.preserveAttachmentCount = 0;
+ subpass_description.pPreserveAttachments = nullptr;
+
+ VkSubpassDependency dependency;
+ dependency.srcSubpass = VK_SUBPASS_EXTERNAL;
+ dependency.dstSubpass = 0;
+ dependency.srcStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
+ dependency.dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
+ dependency.srcAccessMask = 0;
+ dependency.dstAccessMask =
+ VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
+ dependency.dependencyFlags = 0;
+
+ VkRenderPassCreateInfo renderpass_ci;
+ renderpass_ci.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
+ renderpass_ci.pNext = nullptr;
+ renderpass_ci.flags = 0;
+ renderpass_ci.attachmentCount = 1;
+ renderpass_ci.pAttachments = &color_attachment;
+ renderpass_ci.subpassCount = 1;
+ renderpass_ci.pSubpasses = &subpass_description;
+ renderpass_ci.dependencyCount = 1;
+ renderpass_ci.pDependencies = &dependency;
+
+ renderpass = device.GetLogical().CreateRenderPass(renderpass_ci);
}
void VKBlitScreen::CreateDescriptorSetLayout() {
- const std::array<vk::DescriptorSetLayoutBinding, 2> layout_bindings{
- vk::DescriptorSetLayoutBinding(0, vk::DescriptorType::eUniformBuffer, 1,
- vk::ShaderStageFlagBits::eVertex, nullptr),
- vk::DescriptorSetLayoutBinding(1, vk::DescriptorType::eCombinedImageSampler, 1,
- vk::ShaderStageFlagBits::eFragment, nullptr)};
- const vk::DescriptorSetLayoutCreateInfo descriptor_layout_ci(
- {}, static_cast<u32>(layout_bindings.size()), layout_bindings.data());
-
- const auto dev = device.GetLogical();
- const auto& dld = device.GetDispatchLoader();
- descriptor_set_layout = dev.createDescriptorSetLayoutUnique(descriptor_layout_ci, nullptr, dld);
+ std::array<VkDescriptorSetLayoutBinding, 2> layout_bindings;
+ layout_bindings[0].binding = 0;
+ layout_bindings[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
+ layout_bindings[0].descriptorCount = 1;
+ layout_bindings[0].stageFlags = VK_SHADER_STAGE_VERTEX_BIT;
+ layout_bindings[0].pImmutableSamplers = nullptr;
+ layout_bindings[1].binding = 1;
+ layout_bindings[1].descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
+ layout_bindings[1].descriptorCount = 1;
+ layout_bindings[1].stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT;
+ layout_bindings[1].pImmutableSamplers = nullptr;
+
+ VkDescriptorSetLayoutCreateInfo ci;
+ ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
+ ci.pNext = nullptr;
+ ci.flags = 0;
+ ci.bindingCount = static_cast<u32>(layout_bindings.size());
+ ci.pBindings = layout_bindings.data();
+
+ descriptor_set_layout = device.GetLogical().CreateDescriptorSetLayout(ci);
}
void VKBlitScreen::CreateDescriptorSets() {
- const auto dev = device.GetLogical();
- const auto& dld = device.GetDispatchLoader();
-
- descriptor_sets.resize(image_count);
- for (std::size_t i = 0; i < image_count; ++i) {
- const vk::DescriptorSetLayout layout = *descriptor_set_layout;
- const vk::DescriptorSetAllocateInfo descriptor_set_ai(*descriptor_pool, 1, &layout);
- const vk::Result result =
- dev.allocateDescriptorSets(&descriptor_set_ai, &descriptor_sets[i], dld);
- ASSERT(result == vk::Result::eSuccess);
- }
+ const std::vector layouts(image_count, *descriptor_set_layout);
+
+ VkDescriptorSetAllocateInfo ai;
+ ai.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
+ ai.pNext = nullptr;
+ ai.descriptorPool = *descriptor_pool;
+ ai.descriptorSetCount = static_cast<u32>(image_count);
+ ai.pSetLayouts = layouts.data();
+ descriptor_sets = descriptor_pool.Allocate(ai);
}
void VKBlitScreen::CreatePipelineLayout() {
- const vk::PipelineLayoutCreateInfo pipeline_layout_ci({}, 1, &descriptor_set_layout.get(), 0,
- nullptr);
- const auto dev = device.GetLogical();
- const auto& dld = device.GetDispatchLoader();
- pipeline_layout = dev.createPipelineLayoutUnique(pipeline_layout_ci, nullptr, dld);
+ VkPipelineLayoutCreateInfo ci;
+ ci.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
+ ci.pNext = nullptr;
+ ci.flags = 0;
+ ci.setLayoutCount = 1;
+ ci.pSetLayouts = descriptor_set_layout.address();
+ ci.pushConstantRangeCount = 0;
+ ci.pPushConstantRanges = nullptr;
+ pipeline_layout = device.GetLogical().CreatePipelineLayout(ci);
}
void VKBlitScreen::CreateGraphicsPipeline() {
- const std::array shader_stages = {
- vk::PipelineShaderStageCreateInfo({}, vk::ShaderStageFlagBits::eVertex, *vertex_shader,
- "main", nullptr),
- vk::PipelineShaderStageCreateInfo({}, vk::ShaderStageFlagBits::eFragment, *fragment_shader,
- "main", nullptr)};
+ std::array<VkPipelineShaderStageCreateInfo, 2> shader_stages;
+ shader_stages[0].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
+ shader_stages[0].pNext = nullptr;
+ shader_stages[0].flags = 0;
+ shader_stages[0].stage = VK_SHADER_STAGE_VERTEX_BIT;
+ shader_stages[0].module = *vertex_shader;
+ shader_stages[0].pName = "main";
+ shader_stages[0].pSpecializationInfo = nullptr;
+ shader_stages[1].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
+ shader_stages[1].pNext = nullptr;
+ shader_stages[1].flags = 0;
+ shader_stages[1].stage = VK_SHADER_STAGE_FRAGMENT_BIT;
+ shader_stages[1].module = *fragment_shader;
+ shader_stages[1].pName = "main";
+ shader_stages[1].pSpecializationInfo = nullptr;
const auto vertex_binding_description = ScreenRectVertex::GetDescription();
const auto vertex_attrs_description = ScreenRectVertex::GetAttributes();
- const vk::PipelineVertexInputStateCreateInfo vertex_input(
- {}, 1, &vertex_binding_description, static_cast<u32>(vertex_attrs_description.size()),
- vertex_attrs_description.data());
-
- const vk::PipelineInputAssemblyStateCreateInfo input_assembly(
- {}, vk::PrimitiveTopology::eTriangleStrip, false);
-
- // Set a dummy viewport, it's going to be replaced by dynamic states.
- const vk::Viewport viewport(0.0f, 0.0f, 1.0f, 1.0f, 0.0f, 1.0f);
- const vk::Rect2D scissor({0, 0}, {1, 1});
- const vk::PipelineViewportStateCreateInfo viewport_state({}, 1, &viewport, 1, &scissor);
-
- const vk::PipelineRasterizationStateCreateInfo rasterizer(
- {}, false, false, vk::PolygonMode::eFill, vk::CullModeFlagBits::eNone,
- vk::FrontFace::eClockwise, false, 0.0f, 0.0f, 0.0f, 1.0f);
-
- const vk::PipelineMultisampleStateCreateInfo multisampling({}, vk::SampleCountFlagBits::e1,
- false, 0.0f, nullptr, false, false);
-
- const vk::PipelineColorBlendAttachmentState color_blend_attachment(
- false, vk::BlendFactor::eZero, vk::BlendFactor::eZero, vk::BlendOp::eAdd,
- vk::BlendFactor::eZero, vk::BlendFactor::eZero, vk::BlendOp::eAdd,
- vk::ColorComponentFlagBits::eR | vk::ColorComponentFlagBits::eG |
- vk::ColorComponentFlagBits::eB | vk::ColorComponentFlagBits::eA);
-
- const vk::PipelineColorBlendStateCreateInfo color_blending(
- {}, false, vk::LogicOp::eCopy, 1, &color_blend_attachment, {0.0f, 0.0f, 0.0f, 0.0f});
-
- const std::array<vk::DynamicState, 2> dynamic_states = {vk::DynamicState::eViewport,
- vk::DynamicState::eScissor};
-
- const vk::PipelineDynamicStateCreateInfo dynamic_state(
- {}, static_cast<u32>(dynamic_states.size()), dynamic_states.data());
-
- const vk::GraphicsPipelineCreateInfo pipeline_ci(
- {}, static_cast<u32>(shader_stages.size()), shader_stages.data(), &vertex_input,
- &input_assembly, nullptr, &viewport_state, &rasterizer, &multisampling, nullptr,
- &color_blending, &dynamic_state, *pipeline_layout, *renderpass, 0, nullptr, 0);
-
- const auto dev = device.GetLogical();
- const auto& dld = device.GetDispatchLoader();
- pipeline = dev.createGraphicsPipelineUnique({}, pipeline_ci, nullptr, dld);
+ VkPipelineVertexInputStateCreateInfo vertex_input_ci;
+ vertex_input_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
+ vertex_input_ci.pNext = nullptr;
+ vertex_input_ci.flags = 0;
+ vertex_input_ci.vertexBindingDescriptionCount = 1;
+ vertex_input_ci.pVertexBindingDescriptions = &vertex_binding_description;
+ vertex_input_ci.vertexAttributeDescriptionCount = u32{vertex_attrs_description.size()};
+ vertex_input_ci.pVertexAttributeDescriptions = vertex_attrs_description.data();
+
+ VkPipelineInputAssemblyStateCreateInfo input_assembly_ci;
+ input_assembly_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO;
+ input_assembly_ci.pNext = nullptr;
+ input_assembly_ci.flags = 0;
+ input_assembly_ci.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP;
+ input_assembly_ci.primitiveRestartEnable = VK_FALSE;
+
+ VkPipelineViewportStateCreateInfo viewport_state_ci;
+ viewport_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO;
+ viewport_state_ci.pNext = nullptr;
+ viewport_state_ci.flags = 0;
+ viewport_state_ci.viewportCount = 1;
+ viewport_state_ci.pViewports = nullptr;
+ viewport_state_ci.scissorCount = 1;
+ viewport_state_ci.pScissors = nullptr;
+
+ VkPipelineRasterizationStateCreateInfo rasterization_ci;
+ rasterization_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
+ rasterization_ci.pNext = nullptr;
+ rasterization_ci.flags = 0;
+ rasterization_ci.depthClampEnable = VK_FALSE;
+ rasterization_ci.rasterizerDiscardEnable = VK_FALSE;
+ rasterization_ci.polygonMode = VK_POLYGON_MODE_FILL;
+ rasterization_ci.cullMode = VK_CULL_MODE_NONE;
+ rasterization_ci.frontFace = VK_FRONT_FACE_CLOCKWISE;
+ rasterization_ci.depthBiasEnable = VK_FALSE;
+ rasterization_ci.depthBiasConstantFactor = 0.0f;
+ rasterization_ci.depthBiasClamp = 0.0f;
+ rasterization_ci.depthBiasSlopeFactor = 0.0f;
+ rasterization_ci.lineWidth = 1.0f;
+
+ VkPipelineMultisampleStateCreateInfo multisampling_ci;
+ multisampling_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO;
+ multisampling_ci.pNext = nullptr;
+ multisampling_ci.flags = 0;
+ multisampling_ci.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT;
+ multisampling_ci.sampleShadingEnable = VK_FALSE;
+ multisampling_ci.minSampleShading = 0.0f;
+ multisampling_ci.pSampleMask = nullptr;
+ multisampling_ci.alphaToCoverageEnable = VK_FALSE;
+ multisampling_ci.alphaToOneEnable = VK_FALSE;
+
+ VkPipelineColorBlendAttachmentState color_blend_attachment;
+ color_blend_attachment.blendEnable = VK_FALSE;
+ color_blend_attachment.srcColorBlendFactor = VK_BLEND_FACTOR_ZERO;
+ color_blend_attachment.dstColorBlendFactor = VK_BLEND_FACTOR_ZERO;
+ color_blend_attachment.colorBlendOp = VK_BLEND_OP_ADD;
+ color_blend_attachment.srcAlphaBlendFactor = VK_BLEND_FACTOR_ZERO;
+ color_blend_attachment.dstAlphaBlendFactor = VK_BLEND_FACTOR_ZERO;
+ color_blend_attachment.alphaBlendOp = VK_BLEND_OP_ADD;
+ color_blend_attachment.colorWriteMask = VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT |
+ VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT;
+
+ VkPipelineColorBlendStateCreateInfo color_blend_ci;
+ color_blend_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO;
+ color_blend_ci.flags = 0;
+ color_blend_ci.pNext = nullptr;
+ color_blend_ci.logicOpEnable = VK_FALSE;
+ color_blend_ci.logicOp = VK_LOGIC_OP_COPY;
+ color_blend_ci.attachmentCount = 1;
+ color_blend_ci.pAttachments = &color_blend_attachment;
+ color_blend_ci.blendConstants[0] = 0.0f;
+ color_blend_ci.blendConstants[1] = 0.0f;
+ color_blend_ci.blendConstants[2] = 0.0f;
+ color_blend_ci.blendConstants[3] = 0.0f;
+
+ static constexpr std::array dynamic_states = {VK_DYNAMIC_STATE_VIEWPORT,
+ VK_DYNAMIC_STATE_SCISSOR};
+ VkPipelineDynamicStateCreateInfo dynamic_state_ci;
+ dynamic_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO;
+ dynamic_state_ci.pNext = nullptr;
+ dynamic_state_ci.flags = 0;
+ dynamic_state_ci.dynamicStateCount = static_cast<u32>(dynamic_states.size());
+ dynamic_state_ci.pDynamicStates = dynamic_states.data();
+
+ VkGraphicsPipelineCreateInfo pipeline_ci;
+ pipeline_ci.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;
+ pipeline_ci.pNext = nullptr;
+ pipeline_ci.flags = 0;
+ pipeline_ci.stageCount = static_cast<u32>(shader_stages.size());
+ pipeline_ci.pStages = shader_stages.data();
+ pipeline_ci.pVertexInputState = &vertex_input_ci;
+ pipeline_ci.pInputAssemblyState = &input_assembly_ci;
+ pipeline_ci.pTessellationState = nullptr;
+ pipeline_ci.pViewportState = &viewport_state_ci;
+ pipeline_ci.pRasterizationState = &rasterization_ci;
+ pipeline_ci.pMultisampleState = &multisampling_ci;
+ pipeline_ci.pDepthStencilState = nullptr;
+ pipeline_ci.pColorBlendState = &color_blend_ci;
+ pipeline_ci.pDynamicState = &dynamic_state_ci;
+ pipeline_ci.layout = *pipeline_layout;
+ pipeline_ci.renderPass = *renderpass;
+ pipeline_ci.subpass = 0;
+ pipeline_ci.basePipelineHandle = 0;
+ pipeline_ci.basePipelineIndex = 0;
+
+ pipeline = device.GetLogical().CreateGraphicsPipeline(pipeline_ci);
}
void VKBlitScreen::CreateSampler() {
- const auto dev = device.GetLogical();
- const auto& dld = device.GetDispatchLoader();
- const vk::SamplerCreateInfo sampler_ci(
- {}, vk::Filter::eLinear, vk::Filter::eLinear, vk::SamplerMipmapMode::eLinear,
- vk::SamplerAddressMode::eClampToBorder, vk::SamplerAddressMode::eClampToBorder,
- vk::SamplerAddressMode::eClampToBorder, 0.0f, false, 0.0f, false, vk::CompareOp::eNever,
- 0.0f, 0.0f, vk::BorderColor::eFloatOpaqueBlack, false);
- sampler = dev.createSamplerUnique(sampler_ci, nullptr, dld);
+ VkSamplerCreateInfo ci;
+ ci.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO;
+ ci.pNext = nullptr;
+ ci.flags = 0;
+ ci.magFilter = VK_FILTER_LINEAR;
+ ci.minFilter = VK_FILTER_NEAREST;
+ ci.mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR;
+ ci.addressModeU = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER;
+ ci.addressModeV = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER;
+ ci.addressModeW = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER;
+ ci.mipLodBias = 0.0f;
+ ci.anisotropyEnable = VK_FALSE;
+ ci.maxAnisotropy = 0.0f;
+ ci.compareEnable = VK_FALSE;
+ ci.compareOp = VK_COMPARE_OP_NEVER;
+ ci.minLod = 0.0f;
+ ci.maxLod = 0.0f;
+ ci.borderColor = VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK;
+ ci.unnormalizedCoordinates = VK_FALSE;
+
+ sampler = device.GetLogical().CreateSampler(ci);
}
void VKBlitScreen::CreateFramebuffers() {
- const vk::Extent2D size{swapchain.GetSize()};
- framebuffers.clear();
+ const VkExtent2D size{swapchain.GetSize()};
framebuffers.resize(image_count);
- const auto dev = device.GetLogical();
- const auto& dld = device.GetDispatchLoader();
+ VkFramebufferCreateInfo ci;
+ ci.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
+ ci.pNext = nullptr;
+ ci.flags = 0;
+ ci.renderPass = *renderpass;
+ ci.attachmentCount = 1;
+ ci.width = size.width;
+ ci.height = size.height;
+ ci.layers = 1;
for (std::size_t i = 0; i < image_count; ++i) {
- const vk::ImageView image_view{swapchain.GetImageViewIndex(i)};
- const vk::FramebufferCreateInfo framebuffer_ci({}, *renderpass, 1, &image_view, size.width,
- size.height, 1);
- framebuffers[i] = dev.createFramebufferUnique(framebuffer_ci, nullptr, dld);
+ const VkImageView image_view{swapchain.GetImageViewIndex(i)};
+ ci.pAttachments = &image_view;
+ framebuffers[i] = device.GetLogical().CreateFramebuffer(ci);
}
}
@@ -507,54 +678,86 @@ void VKBlitScreen::ReleaseRawImages() {
}
void VKBlitScreen::CreateStagingBuffer(const Tegra::FramebufferConfig& framebuffer) {
- const auto dev = device.GetLogical();
- const auto& dld = device.GetDispatchLoader();
-
- const vk::BufferCreateInfo buffer_ci({}, CalculateBufferSize(framebuffer),
- vk::BufferUsageFlagBits::eTransferSrc |
- vk::BufferUsageFlagBits::eVertexBuffer |
- vk::BufferUsageFlagBits::eUniformBuffer,
- vk::SharingMode::eExclusive, 0, nullptr);
- buffer = dev.createBufferUnique(buffer_ci, nullptr, dld);
- buffer_commit = memory_manager.Commit(*buffer, true);
+ VkBufferCreateInfo ci;
+ ci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
+ ci.pNext = nullptr;
+ ci.flags = 0;
+ ci.size = CalculateBufferSize(framebuffer);
+ ci.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT |
+ VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
+ ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
+ ci.queueFamilyIndexCount = 0;
+ ci.pQueueFamilyIndices = nullptr;
+
+ buffer = device.GetLogical().CreateBuffer(ci);
+ buffer_commit = memory_manager.Commit(buffer, true);
}
void VKBlitScreen::CreateRawImages(const Tegra::FramebufferConfig& framebuffer) {
raw_images.resize(image_count);
raw_buffer_commits.resize(image_count);
- const auto format = GetFormat(framebuffer);
+ VkImageCreateInfo ci;
+ ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
+ ci.pNext = nullptr;
+ ci.flags = 0;
+ ci.imageType = VK_IMAGE_TYPE_2D;
+ ci.format = GetFormat(framebuffer);
+ ci.extent.width = framebuffer.width;
+ ci.extent.height = framebuffer.height;
+ ci.extent.depth = 1;
+ ci.mipLevels = 1;
+ ci.arrayLayers = 1;
+ ci.samples = VK_SAMPLE_COUNT_1_BIT;
+ ci.tiling = VK_IMAGE_TILING_LINEAR;
+ ci.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT;
+ ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
+ ci.queueFamilyIndexCount = 0;
+ ci.pQueueFamilyIndices = nullptr;
+ ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
+
for (std::size_t i = 0; i < image_count; ++i) {
- const vk::ImageCreateInfo image_ci(
- {}, vk::ImageType::e2D, format, {framebuffer.width, framebuffer.height, 1}, 1, 1,
- vk::SampleCountFlagBits::e1, vk::ImageTiling::eOptimal,
- vk::ImageUsageFlagBits::eTransferDst | vk::ImageUsageFlagBits::eSampled,
- vk::SharingMode::eExclusive, 0, nullptr, vk::ImageLayout::eUndefined);
-
- raw_images[i] =
- std::make_unique<VKImage>(device, scheduler, image_ci, vk::ImageAspectFlagBits::eColor);
+ raw_images[i] = std::make_unique<VKImage>(device, scheduler, ci, VK_IMAGE_ASPECT_COLOR_BIT);
raw_buffer_commits[i] = memory_manager.Commit(raw_images[i]->GetHandle(), false);
}
}
-void VKBlitScreen::UpdateDescriptorSet(std::size_t image_index, vk::ImageView image_view) const {
- const vk::DescriptorSet descriptor_set = descriptor_sets[image_index];
-
- const vk::DescriptorBufferInfo buffer_info(*buffer, offsetof(BufferData, uniform),
- sizeof(BufferData::uniform));
- const vk::WriteDescriptorSet ubo_write(descriptor_set, 0, 0, 1,
- vk::DescriptorType::eUniformBuffer, nullptr,
- &buffer_info, nullptr);
-
- const vk::DescriptorImageInfo image_info(*sampler, image_view,
- vk::ImageLayout::eShaderReadOnlyOptimal);
- const vk::WriteDescriptorSet sampler_write(descriptor_set, 1, 0, 1,
- vk::DescriptorType::eCombinedImageSampler,
- &image_info, nullptr, nullptr);
-
- const auto dev = device.GetLogical();
- const auto& dld = device.GetDispatchLoader();
- dev.updateDescriptorSets({ubo_write, sampler_write}, {}, dld);
+void VKBlitScreen::UpdateDescriptorSet(std::size_t image_index, VkImageView image_view) const {
+ VkDescriptorBufferInfo buffer_info;
+ buffer_info.buffer = *buffer;
+ buffer_info.offset = offsetof(BufferData, uniform);
+ buffer_info.range = sizeof(BufferData::uniform);
+
+ VkWriteDescriptorSet ubo_write;
+ ubo_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
+ ubo_write.pNext = nullptr;
+ ubo_write.dstSet = descriptor_sets[image_index];
+ ubo_write.dstBinding = 0;
+ ubo_write.dstArrayElement = 0;
+ ubo_write.descriptorCount = 1;
+ ubo_write.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
+ ubo_write.pImageInfo = nullptr;
+ ubo_write.pBufferInfo = &buffer_info;
+ ubo_write.pTexelBufferView = nullptr;
+
+ VkDescriptorImageInfo image_info;
+ image_info.sampler = *sampler;
+ image_info.imageView = image_view;
+ image_info.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
+
+ VkWriteDescriptorSet sampler_write;
+ sampler_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
+ sampler_write.pNext = nullptr;
+ sampler_write.dstSet = descriptor_sets[image_index];
+ sampler_write.dstBinding = 1;
+ sampler_write.dstArrayElement = 0;
+ sampler_write.descriptorCount = 1;
+ sampler_write.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
+ sampler_write.pImageInfo = &image_info;
+ sampler_write.pBufferInfo = nullptr;
+ sampler_write.pTexelBufferView = nullptr;
+
+ device.GetLogical().UpdateDescriptorSets(std::array{ubo_write, sampler_write}, {});
}
void VKBlitScreen::SetUniformData(BufferData& data,
diff --git a/src/video_core/renderer_vulkan/vk_blit_screen.h b/src/video_core/renderer_vulkan/vk_blit_screen.h
index ea680b3f5..5eb544aea 100644
--- a/src/video_core/renderer_vulkan/vk_blit_screen.h
+++ b/src/video_core/renderer_vulkan/vk_blit_screen.h
@@ -8,9 +8,9 @@
#include <memory>
#include <tuple>
-#include "video_core/renderer_vulkan/declarations.h"
#include "video_core/renderer_vulkan/vk_memory_manager.h"
#include "video_core/renderer_vulkan/vk_resource_manager.h"
+#include "video_core/renderer_vulkan/wrapper.h"
namespace Core {
class System;
@@ -49,8 +49,8 @@ public:
void Recreate();
- std::tuple<VKFence&, vk::Semaphore> Draw(const Tegra::FramebufferConfig& framebuffer,
- bool use_accelerated);
+ std::tuple<VKFence&, VkSemaphore> Draw(const Tegra::FramebufferConfig& framebuffer,
+ bool use_accelerated);
private:
struct BufferData;
@@ -74,7 +74,7 @@ private:
void CreateStagingBuffer(const Tegra::FramebufferConfig& framebuffer);
void CreateRawImages(const Tegra::FramebufferConfig& framebuffer);
- void UpdateDescriptorSet(std::size_t image_index, vk::ImageView image_view) const;
+ void UpdateDescriptorSet(std::size_t image_index, VkImageView image_view) const;
void SetUniformData(BufferData& data, const Tegra::FramebufferConfig& framebuffer) const;
void SetVertexData(BufferData& data, const Tegra::FramebufferConfig& framebuffer) const;
@@ -93,23 +93,23 @@ private:
const std::size_t image_count;
const VKScreenInfo& screen_info;
- UniqueShaderModule vertex_shader;
- UniqueShaderModule fragment_shader;
- UniqueDescriptorPool descriptor_pool;
- UniqueDescriptorSetLayout descriptor_set_layout;
- UniquePipelineLayout pipeline_layout;
- UniquePipeline pipeline;
- UniqueRenderPass renderpass;
- std::vector<UniqueFramebuffer> framebuffers;
- std::vector<vk::DescriptorSet> descriptor_sets;
- UniqueSampler sampler;
-
- UniqueBuffer buffer;
+ vk::ShaderModule vertex_shader;
+ vk::ShaderModule fragment_shader;
+ vk::DescriptorPool descriptor_pool;
+ vk::DescriptorSetLayout descriptor_set_layout;
+ vk::PipelineLayout pipeline_layout;
+ vk::Pipeline pipeline;
+ vk::RenderPass renderpass;
+ std::vector<vk::Framebuffer> framebuffers;
+ vk::DescriptorSets descriptor_sets;
+ vk::Sampler sampler;
+
+ vk::Buffer buffer;
VKMemoryCommit buffer_commit;
std::vector<std::unique_ptr<VKFenceWatch>> watches;
- std::vector<UniqueSemaphore> semaphores;
+ std::vector<vk::Semaphore> semaphores;
std::vector<std::unique_ptr<VKImage>> raw_images;
std::vector<VKMemoryCommit> raw_buffer_commits;
u32 raw_width = 0;
diff --git a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp
index 1ba544943..0d167afbd 100644
--- a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp
@@ -11,48 +11,50 @@
#include "common/assert.h"
#include "common/bit_util.h"
#include "core/core.h"
-#include "video_core/renderer_vulkan/declarations.h"
#include "video_core/renderer_vulkan/vk_buffer_cache.h"
#include "video_core/renderer_vulkan/vk_device.h"
#include "video_core/renderer_vulkan/vk_scheduler.h"
#include "video_core/renderer_vulkan/vk_stream_buffer.h"
+#include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan {
namespace {
-const auto BufferUsage =
- vk::BufferUsageFlagBits::eVertexBuffer | vk::BufferUsageFlagBits::eIndexBuffer |
- vk::BufferUsageFlagBits::eUniformBuffer | vk::BufferUsageFlagBits::eStorageBuffer;
+constexpr VkBufferUsageFlags BUFFER_USAGE =
+ VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_INDEX_BUFFER_BIT |
+ VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
-const auto UploadPipelineStage =
- vk::PipelineStageFlagBits::eTransfer | vk::PipelineStageFlagBits::eVertexInput |
- vk::PipelineStageFlagBits::eVertexShader | vk::PipelineStageFlagBits::eFragmentShader |
- vk::PipelineStageFlagBits::eComputeShader;
+constexpr VkPipelineStageFlags UPLOAD_PIPELINE_STAGE =
+ VK_PIPELINE_STAGE_TRANSFER_BIT | VK_PIPELINE_STAGE_VERTEX_INPUT_BIT |
+ VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
+ VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
-const auto UploadAccessBarriers =
- vk::AccessFlagBits::eTransferRead | vk::AccessFlagBits::eShaderRead |
- vk::AccessFlagBits::eUniformRead | vk::AccessFlagBits::eVertexAttributeRead |
- vk::AccessFlagBits::eIndexRead;
+constexpr VkAccessFlags UPLOAD_ACCESS_BARRIERS =
+ VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_UNIFORM_READ_BIT |
+ VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT | VK_ACCESS_INDEX_READ_BIT;
-auto CreateStreamBuffer(const VKDevice& device, VKScheduler& scheduler) {
- return std::make_unique<VKStreamBuffer>(device, scheduler, BufferUsage);
+std::unique_ptr<VKStreamBuffer> CreateStreamBuffer(const VKDevice& device, VKScheduler& scheduler) {
+ return std::make_unique<VKStreamBuffer>(device, scheduler, BUFFER_USAGE);
}
} // Anonymous namespace
CachedBufferBlock::CachedBufferBlock(const VKDevice& device, VKMemoryManager& memory_manager,
- CacheAddr cache_addr, std::size_t size)
- : VideoCommon::BufferBlock{cache_addr, size} {
- const vk::BufferCreateInfo buffer_ci({}, static_cast<vk::DeviceSize>(size),
- BufferUsage | vk::BufferUsageFlagBits::eTransferSrc |
- vk::BufferUsageFlagBits::eTransferDst,
- vk::SharingMode::eExclusive, 0, nullptr);
-
- const auto& dld{device.GetDispatchLoader()};
- const auto dev{device.GetLogical()};
- buffer.handle = dev.createBufferUnique(buffer_ci, nullptr, dld);
- buffer.commit = memory_manager.Commit(*buffer.handle, false);
+ VAddr cpu_addr, std::size_t size)
+ : VideoCommon::BufferBlock{cpu_addr, size} {
+ VkBufferCreateInfo ci;
+ ci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
+ ci.pNext = nullptr;
+ ci.flags = 0;
+ ci.size = static_cast<VkDeviceSize>(size);
+ ci.usage = BUFFER_USAGE | VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
+ ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
+ ci.queueFamilyIndexCount = 0;
+ ci.pQueueFamilyIndices = nullptr;
+
+ buffer.handle = device.GetLogical().CreateBuffer(ci);
+ buffer.commit = memory_manager.Commit(buffer.handle, false);
}
CachedBufferBlock::~CachedBufferBlock() = default;
@@ -60,30 +62,30 @@ CachedBufferBlock::~CachedBufferBlock() = default;
VKBufferCache::VKBufferCache(VideoCore::RasterizerInterface& rasterizer, Core::System& system,
const VKDevice& device, VKMemoryManager& memory_manager,
VKScheduler& scheduler, VKStagingBufferPool& staging_pool)
- : VideoCommon::BufferCache<Buffer, vk::Buffer, VKStreamBuffer>{rasterizer, system,
- CreateStreamBuffer(device,
- scheduler)},
+ : VideoCommon::BufferCache<Buffer, VkBuffer, VKStreamBuffer>{rasterizer, system,
+ CreateStreamBuffer(device,
+ scheduler)},
device{device}, memory_manager{memory_manager}, scheduler{scheduler}, staging_pool{
staging_pool} {}
VKBufferCache::~VKBufferCache() = default;
-Buffer VKBufferCache::CreateBlock(CacheAddr cache_addr, std::size_t size) {
- return std::make_shared<CachedBufferBlock>(device, memory_manager, cache_addr, size);
+Buffer VKBufferCache::CreateBlock(VAddr cpu_addr, std::size_t size) {
+ return std::make_shared<CachedBufferBlock>(device, memory_manager, cpu_addr, size);
}
-const vk::Buffer* VKBufferCache::ToHandle(const Buffer& buffer) {
+const VkBuffer* VKBufferCache::ToHandle(const Buffer& buffer) {
return buffer->GetHandle();
}
-const vk::Buffer* VKBufferCache::GetEmptyBuffer(std::size_t size) {
+const VkBuffer* VKBufferCache::GetEmptyBuffer(std::size_t size) {
size = std::max(size, std::size_t(4));
const auto& empty = staging_pool.GetUnusedBuffer(size, false);
scheduler.RequestOutsideRenderPassOperationContext();
- scheduler.Record([size, buffer = *empty.handle](vk::CommandBuffer cmdbuf, auto& dld) {
- cmdbuf.fillBuffer(buffer, 0, size, 0, dld);
+ scheduler.Record([size, buffer = *empty.handle](vk::CommandBuffer cmdbuf) {
+ cmdbuf.FillBuffer(buffer, 0, size, 0);
});
- return &*empty.handle;
+ return empty.handle.address();
}
void VKBufferCache::UploadBlockData(const Buffer& buffer, std::size_t offset, std::size_t size,
@@ -93,14 +95,21 @@ void VKBufferCache::UploadBlockData(const Buffer& buffer, std::size_t offset, st
scheduler.RequestOutsideRenderPassOperationContext();
scheduler.Record([staging = *staging.handle, buffer = *buffer->GetHandle(), offset,
- size](auto cmdbuf, auto& dld) {
- cmdbuf.copyBuffer(staging, buffer, {{0, offset, size}}, dld);
- cmdbuf.pipelineBarrier(
- vk::PipelineStageFlagBits::eTransfer, UploadPipelineStage, {}, {},
- {vk::BufferMemoryBarrier(vk::AccessFlagBits::eTransferWrite, UploadAccessBarriers,
- VK_QUEUE_FAMILY_IGNORED, VK_QUEUE_FAMILY_IGNORED, buffer,
- offset, size)},
- {}, dld);
+ size](vk::CommandBuffer cmdbuf) {
+ cmdbuf.CopyBuffer(staging, buffer, VkBufferCopy{0, offset, size});
+
+ VkBufferMemoryBarrier barrier;
+ barrier.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER;
+ barrier.pNext = nullptr;
+ barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
+ barrier.dstAccessMask = UPLOAD_ACCESS_BARRIERS;
+ barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+ barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+ barrier.buffer = buffer;
+ barrier.offset = offset;
+ barrier.size = size;
+ cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT, UPLOAD_PIPELINE_STAGE, 0, {},
+ barrier, {});
});
}
@@ -109,16 +118,23 @@ void VKBufferCache::DownloadBlockData(const Buffer& buffer, std::size_t offset,
const auto& staging = staging_pool.GetUnusedBuffer(size, true);
scheduler.RequestOutsideRenderPassOperationContext();
scheduler.Record([staging = *staging.handle, buffer = *buffer->GetHandle(), offset,
- size](auto cmdbuf, auto& dld) {
- cmdbuf.pipelineBarrier(
- vk::PipelineStageFlagBits::eVertexShader | vk::PipelineStageFlagBits::eFragmentShader |
- vk::PipelineStageFlagBits::eComputeShader,
- vk::PipelineStageFlagBits::eTransfer, {}, {},
- {vk::BufferMemoryBarrier(vk::AccessFlagBits::eShaderWrite,
- vk::AccessFlagBits::eTransferRead, VK_QUEUE_FAMILY_IGNORED,
- VK_QUEUE_FAMILY_IGNORED, buffer, offset, size)},
- {}, dld);
- cmdbuf.copyBuffer(buffer, staging, {{offset, 0, size}}, dld);
+ size](vk::CommandBuffer cmdbuf) {
+ VkBufferMemoryBarrier barrier;
+ barrier.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER;
+ barrier.pNext = nullptr;
+ barrier.srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT;
+ barrier.dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
+ barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+ barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+ barrier.buffer = buffer;
+ barrier.offset = offset;
+ barrier.size = size;
+
+ cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_VERTEX_SHADER_BIT |
+ VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
+ VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
+ VK_PIPELINE_STAGE_TRANSFER_BIT, 0, {}, barrier, {});
+ cmdbuf.CopyBuffer(buffer, staging, VkBufferCopy{offset, 0, size});
});
scheduler.Finish();
@@ -129,17 +145,30 @@ void VKBufferCache::CopyBlock(const Buffer& src, const Buffer& dst, std::size_t
std::size_t dst_offset, std::size_t size) {
scheduler.RequestOutsideRenderPassOperationContext();
scheduler.Record([src_buffer = *src->GetHandle(), dst_buffer = *dst->GetHandle(), src_offset,
- dst_offset, size](auto cmdbuf, auto& dld) {
- cmdbuf.copyBuffer(src_buffer, dst_buffer, {{src_offset, dst_offset, size}}, dld);
- cmdbuf.pipelineBarrier(
- vk::PipelineStageFlagBits::eTransfer, UploadPipelineStage, {}, {},
- {vk::BufferMemoryBarrier(vk::AccessFlagBits::eTransferRead,
- vk::AccessFlagBits::eShaderWrite, VK_QUEUE_FAMILY_IGNORED,
- VK_QUEUE_FAMILY_IGNORED, src_buffer, src_offset, size),
- vk::BufferMemoryBarrier(vk::AccessFlagBits::eTransferWrite, UploadAccessBarriers,
- VK_QUEUE_FAMILY_IGNORED, VK_QUEUE_FAMILY_IGNORED, dst_buffer,
- dst_offset, size)},
- {}, dld);
+ dst_offset, size](vk::CommandBuffer cmdbuf) {
+ cmdbuf.CopyBuffer(src_buffer, dst_buffer, VkBufferCopy{src_offset, dst_offset, size});
+
+ std::array<VkBufferMemoryBarrier, 2> barriers;
+ barriers[0].sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER;
+ barriers[0].pNext = nullptr;
+ barriers[0].srcAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
+ barriers[0].dstAccessMask = VK_ACCESS_SHADER_WRITE_BIT;
+ barriers[0].srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+ barriers[0].dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+ barriers[0].buffer = src_buffer;
+ barriers[0].offset = src_offset;
+ barriers[0].size = size;
+ barriers[1].sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER;
+ barriers[1].pNext = nullptr;
+ barriers[1].srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
+ barriers[1].dstAccessMask = UPLOAD_ACCESS_BARRIERS;
+ barriers[1].srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+ barriers[1].dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+ barriers[1].buffer = dst_buffer;
+ barriers[1].offset = dst_offset;
+ barriers[1].size = size;
+ cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT, UPLOAD_PIPELINE_STAGE, 0, {},
+ barriers, {});
});
}
diff --git a/src/video_core/renderer_vulkan/vk_buffer_cache.h b/src/video_core/renderer_vulkan/vk_buffer_cache.h
index 3f38eed0c..d3c23da98 100644
--- a/src/video_core/renderer_vulkan/vk_buffer_cache.h
+++ b/src/video_core/renderer_vulkan/vk_buffer_cache.h
@@ -11,11 +11,11 @@
#include "common/common_types.h"
#include "video_core/buffer_cache/buffer_cache.h"
#include "video_core/rasterizer_cache.h"
-#include "video_core/renderer_vulkan/declarations.h"
#include "video_core/renderer_vulkan/vk_memory_manager.h"
#include "video_core/renderer_vulkan/vk_resource_manager.h"
#include "video_core/renderer_vulkan/vk_staging_buffer_pool.h"
#include "video_core/renderer_vulkan/vk_stream_buffer.h"
+#include "video_core/renderer_vulkan/wrapper.h"
namespace Core {
class System;
@@ -30,11 +30,11 @@ class VKScheduler;
class CachedBufferBlock final : public VideoCommon::BufferBlock {
public:
explicit CachedBufferBlock(const VKDevice& device, VKMemoryManager& memory_manager,
- CacheAddr cache_addr, std::size_t size);
+ VAddr cpu_addr, std::size_t size);
~CachedBufferBlock();
- const vk::Buffer* GetHandle() const {
- return &*buffer.handle;
+ const VkBuffer* GetHandle() const {
+ return buffer.handle.address();
}
private:
@@ -43,21 +43,21 @@ private:
using Buffer = std::shared_ptr<CachedBufferBlock>;
-class VKBufferCache final : public VideoCommon::BufferCache<Buffer, vk::Buffer, VKStreamBuffer> {
+class VKBufferCache final : public VideoCommon::BufferCache<Buffer, VkBuffer, VKStreamBuffer> {
public:
explicit VKBufferCache(VideoCore::RasterizerInterface& rasterizer, Core::System& system,
const VKDevice& device, VKMemoryManager& memory_manager,
VKScheduler& scheduler, VKStagingBufferPool& staging_pool);
~VKBufferCache();
- const vk::Buffer* GetEmptyBuffer(std::size_t size) override;
+ const VkBuffer* GetEmptyBuffer(std::size_t size) override;
protected:
void WriteBarrier() override {}
- Buffer CreateBlock(CacheAddr cache_addr, std::size_t size) override;
+ Buffer CreateBlock(VAddr cpu_addr, std::size_t size) override;
- const vk::Buffer* ToHandle(const Buffer& buffer) override;
+ const VkBuffer* ToHandle(const Buffer& buffer) override;
void UploadBlockData(const Buffer& buffer, std::size_t offset, std::size_t size,
const u8* data) override;
diff --git a/src/video_core/renderer_vulkan/vk_compute_pass.cpp b/src/video_core/renderer_vulkan/vk_compute_pass.cpp
index 7bdda3d79..9d92305f4 100644
--- a/src/video_core/renderer_vulkan/vk_compute_pass.cpp
+++ b/src/video_core/renderer_vulkan/vk_compute_pass.cpp
@@ -10,13 +10,13 @@
#include "common/alignment.h"
#include "common/assert.h"
#include "common/common_types.h"
-#include "video_core/renderer_vulkan/declarations.h"
#include "video_core/renderer_vulkan/vk_compute_pass.h"
#include "video_core/renderer_vulkan/vk_descriptor_pool.h"
#include "video_core/renderer_vulkan/vk_device.h"
#include "video_core/renderer_vulkan/vk_scheduler.h"
#include "video_core/renderer_vulkan/vk_staging_buffer_pool.h"
#include "video_core/renderer_vulkan/vk_update_descriptor.h"
+#include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan {
@@ -114,6 +114,35 @@ constexpr u8 quad_array[] = {
0xf9, 0x00, 0x02, 0x00, 0x4c, 0x00, 0x00, 0x00, 0xf8, 0x00, 0x02, 0x00, 0x4b, 0x00, 0x00, 0x00,
0xfd, 0x00, 0x01, 0x00, 0x38, 0x00, 0x01, 0x00};
+VkDescriptorSetLayoutBinding BuildQuadArrayPassDescriptorSetLayoutBinding() {
+ VkDescriptorSetLayoutBinding binding;
+ binding.binding = 0;
+ binding.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
+ binding.descriptorCount = 1;
+ binding.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT;
+ binding.pImmutableSamplers = nullptr;
+ return binding;
+}
+
+VkDescriptorUpdateTemplateEntryKHR BuildQuadArrayPassDescriptorUpdateTemplateEntry() {
+ VkDescriptorUpdateTemplateEntryKHR entry;
+ entry.dstBinding = 0;
+ entry.dstArrayElement = 0;
+ entry.descriptorCount = 1;
+ entry.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
+ entry.offset = 0;
+ entry.stride = sizeof(DescriptorUpdateEntry);
+ return entry;
+}
+
+VkPushConstantRange BuildQuadArrayPassPushConstantRange() {
+ VkPushConstantRange range;
+ range.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT;
+ range.offset = 0;
+ range.size = sizeof(u32);
+ return range;
+}
+
// Uint8 SPIR-V module. Generated from the "shaders/" directory.
constexpr u8 uint8_pass[] = {
0x03, 0x02, 0x23, 0x07, 0x00, 0x00, 0x01, 0x00, 0x07, 0x00, 0x08, 0x00, 0x2f, 0x00, 0x00, 0x00,
@@ -191,53 +220,111 @@ constexpr u8 uint8_pass[] = {
0xf9, 0x00, 0x02, 0x00, 0x1d, 0x00, 0x00, 0x00, 0xf8, 0x00, 0x02, 0x00, 0x1d, 0x00, 0x00, 0x00,
0xfd, 0x00, 0x01, 0x00, 0x38, 0x00, 0x01, 0x00};
+std::array<VkDescriptorSetLayoutBinding, 2> BuildUint8PassDescriptorSetBindings() {
+ std::array<VkDescriptorSetLayoutBinding, 2> bindings;
+ bindings[0].binding = 0;
+ bindings[0].descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
+ bindings[0].descriptorCount = 1;
+ bindings[0].stageFlags = VK_SHADER_STAGE_COMPUTE_BIT;
+ bindings[0].pImmutableSamplers = nullptr;
+ bindings[1].binding = 1;
+ bindings[1].descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
+ bindings[1].descriptorCount = 1;
+ bindings[1].stageFlags = VK_SHADER_STAGE_COMPUTE_BIT;
+ bindings[1].pImmutableSamplers = nullptr;
+ return bindings;
+}
+
+VkDescriptorUpdateTemplateEntryKHR BuildUint8PassDescriptorUpdateTemplateEntry() {
+ VkDescriptorUpdateTemplateEntryKHR entry;
+ entry.dstBinding = 0;
+ entry.dstArrayElement = 0;
+ entry.descriptorCount = 2;
+ entry.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
+ entry.offset = 0;
+ entry.stride = sizeof(DescriptorUpdateEntry);
+ return entry;
+}
+
} // Anonymous namespace
VKComputePass::VKComputePass(const VKDevice& device, VKDescriptorPool& descriptor_pool,
- const std::vector<vk::DescriptorSetLayoutBinding>& bindings,
- const std::vector<vk::DescriptorUpdateTemplateEntry>& templates,
- const std::vector<vk::PushConstantRange> push_constants,
- std::size_t code_size, const u8* code) {
- const auto dev = device.GetLogical();
- const auto& dld = device.GetDispatchLoader();
-
- const vk::DescriptorSetLayoutCreateInfo descriptor_layout_ci(
- {}, static_cast<u32>(bindings.size()), bindings.data());
- descriptor_set_layout = dev.createDescriptorSetLayoutUnique(descriptor_layout_ci, nullptr, dld);
-
- const vk::PipelineLayoutCreateInfo pipeline_layout_ci({}, 1, &*descriptor_set_layout,
- static_cast<u32>(push_constants.size()),
- push_constants.data());
- layout = dev.createPipelineLayoutUnique(pipeline_layout_ci, nullptr, dld);
+ vk::Span<VkDescriptorSetLayoutBinding> bindings,
+ vk::Span<VkDescriptorUpdateTemplateEntryKHR> templates,
+ vk::Span<VkPushConstantRange> push_constants, std::size_t code_size,
+ const u8* code) {
+ VkDescriptorSetLayoutCreateInfo descriptor_layout_ci;
+ descriptor_layout_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
+ descriptor_layout_ci.pNext = nullptr;
+ descriptor_layout_ci.flags = 0;
+ descriptor_layout_ci.bindingCount = bindings.size();
+ descriptor_layout_ci.pBindings = bindings.data();
+ descriptor_set_layout = device.GetLogical().CreateDescriptorSetLayout(descriptor_layout_ci);
+
+ VkPipelineLayoutCreateInfo pipeline_layout_ci;
+ pipeline_layout_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
+ pipeline_layout_ci.pNext = nullptr;
+ pipeline_layout_ci.flags = 0;
+ pipeline_layout_ci.setLayoutCount = 1;
+ pipeline_layout_ci.pSetLayouts = descriptor_set_layout.address();
+ pipeline_layout_ci.pushConstantRangeCount = push_constants.size();
+ pipeline_layout_ci.pPushConstantRanges = push_constants.data();
+ layout = device.GetLogical().CreatePipelineLayout(pipeline_layout_ci);
if (!templates.empty()) {
- const vk::DescriptorUpdateTemplateCreateInfo template_ci(
- {}, static_cast<u32>(templates.size()), templates.data(),
- vk::DescriptorUpdateTemplateType::eDescriptorSet, *descriptor_set_layout,
- vk::PipelineBindPoint::eGraphics, *layout, 0);
- descriptor_template = dev.createDescriptorUpdateTemplateUnique(template_ci, nullptr, dld);
+ VkDescriptorUpdateTemplateCreateInfoKHR template_ci;
+ template_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO_KHR;
+ template_ci.pNext = nullptr;
+ template_ci.flags = 0;
+ template_ci.descriptorUpdateEntryCount = templates.size();
+ template_ci.pDescriptorUpdateEntries = templates.data();
+ template_ci.templateType = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET_KHR;
+ template_ci.descriptorSetLayout = *descriptor_set_layout;
+ template_ci.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
+ template_ci.pipelineLayout = *layout;
+ template_ci.set = 0;
+ descriptor_template = device.GetLogical().CreateDescriptorUpdateTemplateKHR(template_ci);
descriptor_allocator.emplace(descriptor_pool, *descriptor_set_layout);
}
auto code_copy = std::make_unique<u32[]>(code_size / sizeof(u32) + 1);
std::memcpy(code_copy.get(), code, code_size);
- const vk::ShaderModuleCreateInfo module_ci({}, code_size, code_copy.get());
- module = dev.createShaderModuleUnique(module_ci, nullptr, dld);
- const vk::PipelineShaderStageCreateInfo stage_ci({}, vk::ShaderStageFlagBits::eCompute, *module,
- "main", nullptr);
+ VkShaderModuleCreateInfo module_ci;
+ module_ci.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
+ module_ci.pNext = nullptr;
+ module_ci.flags = 0;
+ module_ci.codeSize = code_size;
+ module_ci.pCode = code_copy.get();
+ module = device.GetLogical().CreateShaderModule(module_ci);
+
+ VkComputePipelineCreateInfo pipeline_ci;
+ pipeline_ci.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO;
+ pipeline_ci.pNext = nullptr;
+ pipeline_ci.flags = 0;
+ pipeline_ci.layout = *layout;
+ pipeline_ci.basePipelineHandle = nullptr;
+ pipeline_ci.basePipelineIndex = 0;
- const vk::ComputePipelineCreateInfo pipeline_ci({}, stage_ci, *layout, nullptr, 0);
- pipeline = dev.createComputePipelineUnique(nullptr, pipeline_ci, nullptr, dld);
+ VkPipelineShaderStageCreateInfo& stage_ci = pipeline_ci.stage;
+ stage_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
+ stage_ci.pNext = nullptr;
+ stage_ci.flags = 0;
+ stage_ci.stage = VK_SHADER_STAGE_COMPUTE_BIT;
+ stage_ci.module = *module;
+ stage_ci.pName = "main";
+ stage_ci.pSpecializationInfo = nullptr;
+
+ pipeline = device.GetLogical().CreateComputePipeline(pipeline_ci);
}
VKComputePass::~VKComputePass() = default;
-vk::DescriptorSet VKComputePass::CommitDescriptorSet(
- VKUpdateDescriptorQueue& update_descriptor_queue, VKFence& fence) {
+VkDescriptorSet VKComputePass::CommitDescriptorSet(VKUpdateDescriptorQueue& update_descriptor_queue,
+ VKFence& fence) {
if (!descriptor_template) {
- return {};
+ return nullptr;
}
const auto set = descriptor_allocator->Commit(fence);
update_descriptor_queue.Send(*descriptor_template, set);
@@ -248,25 +335,21 @@ QuadArrayPass::QuadArrayPass(const VKDevice& device, VKScheduler& scheduler,
VKDescriptorPool& descriptor_pool,
VKStagingBufferPool& staging_buffer_pool,
VKUpdateDescriptorQueue& update_descriptor_queue)
- : VKComputePass(device, descriptor_pool,
- {vk::DescriptorSetLayoutBinding(0, vk::DescriptorType::eStorageBuffer, 1,
- vk::ShaderStageFlagBits::eCompute, nullptr)},
- {vk::DescriptorUpdateTemplateEntry(0, 0, 1, vk::DescriptorType::eStorageBuffer,
- 0, sizeof(DescriptorUpdateEntry))},
- {vk::PushConstantRange(vk::ShaderStageFlagBits::eCompute, 0, sizeof(u32))},
- std::size(quad_array), quad_array),
+ : VKComputePass(device, descriptor_pool, BuildQuadArrayPassDescriptorSetLayoutBinding(),
+ BuildQuadArrayPassDescriptorUpdateTemplateEntry(),
+ BuildQuadArrayPassPushConstantRange(), std::size(quad_array), quad_array),
scheduler{scheduler}, staging_buffer_pool{staging_buffer_pool},
update_descriptor_queue{update_descriptor_queue} {}
QuadArrayPass::~QuadArrayPass() = default;
-std::pair<const vk::Buffer&, vk::DeviceSize> QuadArrayPass::Assemble(u32 num_vertices, u32 first) {
+std::pair<const VkBuffer*, VkDeviceSize> QuadArrayPass::Assemble(u32 num_vertices, u32 first) {
const u32 num_triangle_vertices = num_vertices * 6 / 4;
const std::size_t staging_size = num_triangle_vertices * sizeof(u32);
auto& buffer = staging_buffer_pool.GetUnusedBuffer(staging_size, false);
update_descriptor_queue.Acquire();
- update_descriptor_queue.AddBuffer(&*buffer.handle, 0, staging_size);
+ update_descriptor_queue.AddBuffer(buffer.handle.address(), 0, staging_size);
const auto set = CommitDescriptorSet(update_descriptor_queue, scheduler.GetFence());
scheduler.RequestOutsideRenderPassOperationContext();
@@ -274,66 +357,72 @@ std::pair<const vk::Buffer&, vk::DeviceSize> QuadArrayPass::Assemble(u32 num_ver
ASSERT(num_vertices % 4 == 0);
const u32 num_quads = num_vertices / 4;
scheduler.Record([layout = *layout, pipeline = *pipeline, buffer = *buffer.handle, num_quads,
- first, set](auto cmdbuf, auto& dld) {
+ first, set](vk::CommandBuffer cmdbuf) {
constexpr u32 dispatch_size = 1024;
- cmdbuf.bindPipeline(vk::PipelineBindPoint::eCompute, pipeline, dld);
- cmdbuf.bindDescriptorSets(vk::PipelineBindPoint::eCompute, layout, 0, {set}, {}, dld);
- cmdbuf.pushConstants(layout, vk::ShaderStageFlagBits::eCompute, 0, sizeof(first), &first,
- dld);
- cmdbuf.dispatch(Common::AlignUp(num_quads, dispatch_size) / dispatch_size, 1, 1, dld);
-
- const vk::BufferMemoryBarrier barrier(
- vk::AccessFlagBits::eShaderWrite, vk::AccessFlagBits::eVertexAttributeRead,
- VK_QUEUE_FAMILY_IGNORED, VK_QUEUE_FAMILY_IGNORED, buffer, 0,
- static_cast<vk::DeviceSize>(num_quads) * 6 * sizeof(u32));
- cmdbuf.pipelineBarrier(vk::PipelineStageFlagBits::eComputeShader,
- vk::PipelineStageFlagBits::eVertexInput, {}, {}, {barrier}, {}, dld);
+ cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, pipeline);
+ cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_COMPUTE, layout, 0, set, {});
+ cmdbuf.PushConstants(layout, VK_SHADER_STAGE_COMPUTE_BIT, 0, sizeof(first), &first);
+ cmdbuf.Dispatch(Common::AlignUp(num_quads, dispatch_size) / dispatch_size, 1, 1);
+
+ VkBufferMemoryBarrier barrier;
+ barrier.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER;
+ barrier.pNext = nullptr;
+ barrier.srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT;
+ barrier.dstAccessMask = VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT;
+ barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+ barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+ barrier.buffer = buffer;
+ barrier.offset = 0;
+ barrier.size = static_cast<VkDeviceSize>(num_quads) * 6 * sizeof(u32);
+ cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
+ VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, 0, {}, {barrier}, {});
});
- return {*buffer.handle, 0};
+ return {buffer.handle.address(), 0};
}
Uint8Pass::Uint8Pass(const VKDevice& device, VKScheduler& scheduler,
VKDescriptorPool& descriptor_pool, VKStagingBufferPool& staging_buffer_pool,
VKUpdateDescriptorQueue& update_descriptor_queue)
- : VKComputePass(device, descriptor_pool,
- {vk::DescriptorSetLayoutBinding(0, vk::DescriptorType::eStorageBuffer, 1,
- vk::ShaderStageFlagBits::eCompute, nullptr),
- vk::DescriptorSetLayoutBinding(1, vk::DescriptorType::eStorageBuffer, 1,
- vk::ShaderStageFlagBits::eCompute, nullptr)},
- {vk::DescriptorUpdateTemplateEntry(0, 0, 2, vk::DescriptorType::eStorageBuffer,
- 0, sizeof(DescriptorUpdateEntry))},
- {}, std::size(uint8_pass), uint8_pass),
+ : VKComputePass(device, descriptor_pool, BuildUint8PassDescriptorSetBindings(),
+ BuildUint8PassDescriptorUpdateTemplateEntry(), {}, std::size(uint8_pass),
+ uint8_pass),
scheduler{scheduler}, staging_buffer_pool{staging_buffer_pool},
update_descriptor_queue{update_descriptor_queue} {}
Uint8Pass::~Uint8Pass() = default;
-std::pair<const vk::Buffer*, u64> Uint8Pass::Assemble(u32 num_vertices, vk::Buffer src_buffer,
- u64 src_offset) {
+std::pair<const VkBuffer*, u64> Uint8Pass::Assemble(u32 num_vertices, VkBuffer src_buffer,
+ u64 src_offset) {
const auto staging_size = static_cast<u32>(num_vertices * sizeof(u16));
auto& buffer = staging_buffer_pool.GetUnusedBuffer(staging_size, false);
update_descriptor_queue.Acquire();
update_descriptor_queue.AddBuffer(&src_buffer, src_offset, num_vertices);
- update_descriptor_queue.AddBuffer(&*buffer.handle, 0, staging_size);
+ update_descriptor_queue.AddBuffer(buffer.handle.address(), 0, staging_size);
const auto set = CommitDescriptorSet(update_descriptor_queue, scheduler.GetFence());
scheduler.RequestOutsideRenderPassOperationContext();
scheduler.Record([layout = *layout, pipeline = *pipeline, buffer = *buffer.handle, set,
- num_vertices](auto cmdbuf, auto& dld) {
+ num_vertices](vk::CommandBuffer cmdbuf) {
constexpr u32 dispatch_size = 1024;
- cmdbuf.bindPipeline(vk::PipelineBindPoint::eCompute, pipeline, dld);
- cmdbuf.bindDescriptorSets(vk::PipelineBindPoint::eCompute, layout, 0, {set}, {}, dld);
- cmdbuf.dispatch(Common::AlignUp(num_vertices, dispatch_size) / dispatch_size, 1, 1, dld);
-
- const vk::BufferMemoryBarrier barrier(
- vk::AccessFlagBits::eShaderWrite, vk::AccessFlagBits::eVertexAttributeRead,
- VK_QUEUE_FAMILY_IGNORED, VK_QUEUE_FAMILY_IGNORED, buffer, 0,
- static_cast<vk::DeviceSize>(num_vertices) * sizeof(u16));
- cmdbuf.pipelineBarrier(vk::PipelineStageFlagBits::eComputeShader,
- vk::PipelineStageFlagBits::eVertexInput, {}, {}, {barrier}, {}, dld);
+ cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, pipeline);
+ cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_COMPUTE, layout, 0, set, {});
+ cmdbuf.Dispatch(Common::AlignUp(num_vertices, dispatch_size) / dispatch_size, 1, 1);
+
+ VkBufferMemoryBarrier barrier;
+ barrier.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER;
+ barrier.pNext = nullptr;
+ barrier.srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT;
+ barrier.dstAccessMask = VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT;
+ barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+ barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+ barrier.buffer = buffer;
+ barrier.offset = 0;
+ barrier.size = static_cast<VkDeviceSize>(num_vertices * sizeof(u16));
+ cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
+ VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, 0, {}, barrier, {});
});
- return {&*buffer.handle, 0};
+ return {buffer.handle.address(), 0};
}
} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_compute_pass.h b/src/video_core/renderer_vulkan/vk_compute_pass.h
index 7057eb837..c62516bff 100644
--- a/src/video_core/renderer_vulkan/vk_compute_pass.h
+++ b/src/video_core/renderer_vulkan/vk_compute_pass.h
@@ -8,8 +8,8 @@
#include <utility>
#include <vector>
#include "common/common_types.h"
-#include "video_core/renderer_vulkan/declarations.h"
#include "video_core/renderer_vulkan/vk_descriptor_pool.h"
+#include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan {
@@ -22,24 +22,24 @@ class VKUpdateDescriptorQueue;
class VKComputePass {
public:
explicit VKComputePass(const VKDevice& device, VKDescriptorPool& descriptor_pool,
- const std::vector<vk::DescriptorSetLayoutBinding>& bindings,
- const std::vector<vk::DescriptorUpdateTemplateEntry>& templates,
- const std::vector<vk::PushConstantRange> push_constants,
- std::size_t code_size, const u8* code);
+ vk::Span<VkDescriptorSetLayoutBinding> bindings,
+ vk::Span<VkDescriptorUpdateTemplateEntryKHR> templates,
+ vk::Span<VkPushConstantRange> push_constants, std::size_t code_size,
+ const u8* code);
~VKComputePass();
protected:
- vk::DescriptorSet CommitDescriptorSet(VKUpdateDescriptorQueue& update_descriptor_queue,
- VKFence& fence);
+ VkDescriptorSet CommitDescriptorSet(VKUpdateDescriptorQueue& update_descriptor_queue,
+ VKFence& fence);
- UniqueDescriptorUpdateTemplate descriptor_template;
- UniquePipelineLayout layout;
- UniquePipeline pipeline;
+ vk::DescriptorUpdateTemplateKHR descriptor_template;
+ vk::PipelineLayout layout;
+ vk::Pipeline pipeline;
private:
- UniqueDescriptorSetLayout descriptor_set_layout;
+ vk::DescriptorSetLayout descriptor_set_layout;
std::optional<DescriptorAllocator> descriptor_allocator;
- UniqueShaderModule module;
+ vk::ShaderModule module;
};
class QuadArrayPass final : public VKComputePass {
@@ -50,7 +50,7 @@ public:
VKUpdateDescriptorQueue& update_descriptor_queue);
~QuadArrayPass();
- std::pair<const vk::Buffer&, vk::DeviceSize> Assemble(u32 num_vertices, u32 first);
+ std::pair<const VkBuffer*, VkDeviceSize> Assemble(u32 num_vertices, u32 first);
private:
VKScheduler& scheduler;
@@ -65,8 +65,7 @@ public:
VKUpdateDescriptorQueue& update_descriptor_queue);
~Uint8Pass();
- std::pair<const vk::Buffer*, u64> Assemble(u32 num_vertices, vk::Buffer src_buffer,
- u64 src_offset);
+ std::pair<const VkBuffer*, u64> Assemble(u32 num_vertices, VkBuffer src_buffer, u64 src_offset);
private:
VKScheduler& scheduler;
diff --git a/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp b/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp
index 60f57d83e..23beafa4f 100644
--- a/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp
+++ b/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp
@@ -5,7 +5,6 @@
#include <memory>
#include <vector>
-#include "video_core/renderer_vulkan/declarations.h"
#include "video_core/renderer_vulkan/vk_compute_pipeline.h"
#include "video_core/renderer_vulkan/vk_descriptor_pool.h"
#include "video_core/renderer_vulkan/vk_device.h"
@@ -14,6 +13,7 @@
#include "video_core/renderer_vulkan/vk_scheduler.h"
#include "video_core/renderer_vulkan/vk_shader_decompiler.h"
#include "video_core/renderer_vulkan/vk_update_descriptor.h"
+#include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan {
@@ -30,7 +30,7 @@ VKComputePipeline::VKComputePipeline(const VKDevice& device, VKScheduler& schedu
VKComputePipeline::~VKComputePipeline() = default;
-vk::DescriptorSet VKComputePipeline::CommitDescriptorSet() {
+VkDescriptorSet VKComputePipeline::CommitDescriptorSet() {
if (!descriptor_template) {
return {};
}
@@ -39,74 +39,109 @@ vk::DescriptorSet VKComputePipeline::CommitDescriptorSet() {
return set;
}
-UniqueDescriptorSetLayout VKComputePipeline::CreateDescriptorSetLayout() const {
- std::vector<vk::DescriptorSetLayoutBinding> bindings;
+vk::DescriptorSetLayout VKComputePipeline::CreateDescriptorSetLayout() const {
+ std::vector<VkDescriptorSetLayoutBinding> bindings;
u32 binding = 0;
- const auto AddBindings = [&](vk::DescriptorType descriptor_type, std::size_t num_entries) {
+ const auto add_bindings = [&](VkDescriptorType descriptor_type, std::size_t num_entries) {
// TODO(Rodrigo): Maybe make individual bindings here?
for (u32 bindpoint = 0; bindpoint < static_cast<u32>(num_entries); ++bindpoint) {
- bindings.emplace_back(binding++, descriptor_type, 1, vk::ShaderStageFlagBits::eCompute,
- nullptr);
+ VkDescriptorSetLayoutBinding& entry = bindings.emplace_back();
+ entry.binding = binding++;
+ entry.descriptorType = descriptor_type;
+ entry.descriptorCount = 1;
+ entry.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT;
+ entry.pImmutableSamplers = nullptr;
}
};
- AddBindings(vk::DescriptorType::eUniformBuffer, entries.const_buffers.size());
- AddBindings(vk::DescriptorType::eStorageBuffer, entries.global_buffers.size());
- AddBindings(vk::DescriptorType::eUniformTexelBuffer, entries.texel_buffers.size());
- AddBindings(vk::DescriptorType::eCombinedImageSampler, entries.samplers.size());
- AddBindings(vk::DescriptorType::eStorageImage, entries.images.size());
-
- const vk::DescriptorSetLayoutCreateInfo descriptor_set_layout_ci(
- {}, static_cast<u32>(bindings.size()), bindings.data());
-
- const auto dev = device.GetLogical();
- const auto& dld = device.GetDispatchLoader();
- return dev.createDescriptorSetLayoutUnique(descriptor_set_layout_ci, nullptr, dld);
+ add_bindings(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, entries.const_buffers.size());
+ add_bindings(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, entries.global_buffers.size());
+ add_bindings(VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, entries.texel_buffers.size());
+ add_bindings(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, entries.samplers.size());
+ add_bindings(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, entries.images.size());
+
+ VkDescriptorSetLayoutCreateInfo ci;
+ ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
+ ci.pNext = nullptr;
+ ci.flags = 0;
+ ci.bindingCount = static_cast<u32>(bindings.size());
+ ci.pBindings = bindings.data();
+ return device.GetLogical().CreateDescriptorSetLayout(ci);
}
-UniquePipelineLayout VKComputePipeline::CreatePipelineLayout() const {
- const vk::PipelineLayoutCreateInfo layout_ci({}, 1, &*descriptor_set_layout, 0, nullptr);
- const auto dev = device.GetLogical();
- return dev.createPipelineLayoutUnique(layout_ci, nullptr, device.GetDispatchLoader());
+vk::PipelineLayout VKComputePipeline::CreatePipelineLayout() const {
+ VkPipelineLayoutCreateInfo ci;
+ ci.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
+ ci.pNext = nullptr;
+ ci.flags = 0;
+ ci.setLayoutCount = 1;
+ ci.pSetLayouts = descriptor_set_layout.address();
+ ci.pushConstantRangeCount = 0;
+ ci.pPushConstantRanges = nullptr;
+ return device.GetLogical().CreatePipelineLayout(ci);
}
-UniqueDescriptorUpdateTemplate VKComputePipeline::CreateDescriptorUpdateTemplate() const {
- std::vector<vk::DescriptorUpdateTemplateEntry> template_entries;
+vk::DescriptorUpdateTemplateKHR VKComputePipeline::CreateDescriptorUpdateTemplate() const {
+ std::vector<VkDescriptorUpdateTemplateEntryKHR> template_entries;
u32 binding = 0;
u32 offset = 0;
FillDescriptorUpdateTemplateEntries(entries, binding, offset, template_entries);
if (template_entries.empty()) {
// If the shader doesn't use descriptor sets, skip template creation.
- return UniqueDescriptorUpdateTemplate{};
+ return {};
}
- const vk::DescriptorUpdateTemplateCreateInfo template_ci(
- {}, static_cast<u32>(template_entries.size()), template_entries.data(),
- vk::DescriptorUpdateTemplateType::eDescriptorSet, *descriptor_set_layout,
- vk::PipelineBindPoint::eGraphics, *layout, DESCRIPTOR_SET);
-
- const auto dev = device.GetLogical();
- const auto& dld = device.GetDispatchLoader();
- return dev.createDescriptorUpdateTemplateUnique(template_ci, nullptr, dld);
+ VkDescriptorUpdateTemplateCreateInfoKHR ci;
+ ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO_KHR;
+ ci.pNext = nullptr;
+ ci.flags = 0;
+ ci.descriptorUpdateEntryCount = static_cast<u32>(template_entries.size());
+ ci.pDescriptorUpdateEntries = template_entries.data();
+ ci.templateType = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET_KHR;
+ ci.descriptorSetLayout = *descriptor_set_layout;
+ ci.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
+ ci.pipelineLayout = *layout;
+ ci.set = DESCRIPTOR_SET;
+ return device.GetLogical().CreateDescriptorUpdateTemplateKHR(ci);
}
-UniqueShaderModule VKComputePipeline::CreateShaderModule(const std::vector<u32>& code) const {
- const vk::ShaderModuleCreateInfo module_ci({}, code.size() * sizeof(u32), code.data());
- const auto dev = device.GetLogical();
- return dev.createShaderModuleUnique(module_ci, nullptr, device.GetDispatchLoader());
+vk::ShaderModule VKComputePipeline::CreateShaderModule(const std::vector<u32>& code) const {
+ VkShaderModuleCreateInfo ci;
+ ci.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
+ ci.pNext = nullptr;
+ ci.flags = 0;
+ ci.codeSize = code.size() * sizeof(u32);
+ ci.pCode = code.data();
+ return device.GetLogical().CreateShaderModule(ci);
}
-UniquePipeline VKComputePipeline::CreatePipeline() const {
- vk::PipelineShaderStageCreateInfo shader_stage_ci({}, vk::ShaderStageFlagBits::eCompute,
- *shader_module, "main", nullptr);
- vk::PipelineShaderStageRequiredSubgroupSizeCreateInfoEXT subgroup_size_ci;
+vk::Pipeline VKComputePipeline::CreatePipeline() const {
+ VkComputePipelineCreateInfo ci;
+ VkPipelineShaderStageCreateInfo& stage_ci = ci.stage;
+ stage_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
+ stage_ci.pNext = nullptr;
+ stage_ci.flags = 0;
+ stage_ci.stage = VK_SHADER_STAGE_COMPUTE_BIT;
+ stage_ci.module = *shader_module;
+ stage_ci.pName = "main";
+ stage_ci.pSpecializationInfo = nullptr;
+
+ VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT subgroup_size_ci;
+ subgroup_size_ci.sType =
+ VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT;
+ subgroup_size_ci.pNext = nullptr;
subgroup_size_ci.requiredSubgroupSize = GuestWarpSize;
- if (entries.uses_warps && device.IsGuestWarpSizeSupported(vk::ShaderStageFlagBits::eCompute)) {
- shader_stage_ci.pNext = &subgroup_size_ci;
+
+ if (entries.uses_warps && device.IsGuestWarpSizeSupported(VK_SHADER_STAGE_COMPUTE_BIT)) {
+ stage_ci.pNext = &subgroup_size_ci;
}
- const vk::ComputePipelineCreateInfo create_info({}, shader_stage_ci, *layout, {}, 0);
- const auto dev = device.GetLogical();
- return dev.createComputePipelineUnique({}, create_info, nullptr, device.GetDispatchLoader());
+ ci.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO;
+ ci.pNext = nullptr;
+ ci.flags = 0;
+ ci.layout = *layout;
+ ci.basePipelineHandle = nullptr;
+ ci.basePipelineIndex = 0;
+ return device.GetLogical().CreateComputePipeline(ci);
}
} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_compute_pipeline.h b/src/video_core/renderer_vulkan/vk_compute_pipeline.h
index 22235c6c9..33b9af29e 100644
--- a/src/video_core/renderer_vulkan/vk_compute_pipeline.h
+++ b/src/video_core/renderer_vulkan/vk_compute_pipeline.h
@@ -7,9 +7,9 @@
#include <memory>
#include "common/common_types.h"
-#include "video_core/renderer_vulkan/declarations.h"
#include "video_core/renderer_vulkan/vk_descriptor_pool.h"
#include "video_core/renderer_vulkan/vk_shader_decompiler.h"
+#include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan {
@@ -25,42 +25,42 @@ public:
const SPIRVShader& shader);
~VKComputePipeline();
- vk::DescriptorSet CommitDescriptorSet();
+ VkDescriptorSet CommitDescriptorSet();
- vk::Pipeline GetHandle() const {
+ VkPipeline GetHandle() const {
return *pipeline;
}
- vk::PipelineLayout GetLayout() const {
+ VkPipelineLayout GetLayout() const {
return *layout;
}
- const ShaderEntries& GetEntries() {
+ const ShaderEntries& GetEntries() const {
return entries;
}
private:
- UniqueDescriptorSetLayout CreateDescriptorSetLayout() const;
+ vk::DescriptorSetLayout CreateDescriptorSetLayout() const;
- UniquePipelineLayout CreatePipelineLayout() const;
+ vk::PipelineLayout CreatePipelineLayout() const;
- UniqueDescriptorUpdateTemplate CreateDescriptorUpdateTemplate() const;
+ vk::DescriptorUpdateTemplateKHR CreateDescriptorUpdateTemplate() const;
- UniqueShaderModule CreateShaderModule(const std::vector<u32>& code) const;
+ vk::ShaderModule CreateShaderModule(const std::vector<u32>& code) const;
- UniquePipeline CreatePipeline() const;
+ vk::Pipeline CreatePipeline() const;
const VKDevice& device;
VKScheduler& scheduler;
ShaderEntries entries;
- UniqueDescriptorSetLayout descriptor_set_layout;
+ vk::DescriptorSetLayout descriptor_set_layout;
DescriptorAllocator descriptor_allocator;
VKUpdateDescriptorQueue& update_descriptor_queue;
- UniquePipelineLayout layout;
- UniqueDescriptorUpdateTemplate descriptor_template;
- UniqueShaderModule shader_module;
- UniquePipeline pipeline;
+ vk::PipelineLayout layout;
+ vk::DescriptorUpdateTemplateKHR descriptor_template;
+ vk::ShaderModule shader_module;
+ vk::Pipeline pipeline;
};
} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_descriptor_pool.cpp b/src/video_core/renderer_vulkan/vk_descriptor_pool.cpp
index cc7c281a0..e9d528aa6 100644
--- a/src/video_core/renderer_vulkan/vk_descriptor_pool.cpp
+++ b/src/video_core/renderer_vulkan/vk_descriptor_pool.cpp
@@ -6,10 +6,10 @@
#include <vector>
#include "common/common_types.h"
-#include "video_core/renderer_vulkan/declarations.h"
#include "video_core/renderer_vulkan/vk_descriptor_pool.h"
#include "video_core/renderer_vulkan/vk_device.h"
#include "video_core/renderer_vulkan/vk_resource_manager.h"
+#include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan {
@@ -17,19 +17,18 @@ namespace Vulkan {
constexpr std::size_t SETS_GROW_RATE = 0x20;
DescriptorAllocator::DescriptorAllocator(VKDescriptorPool& descriptor_pool,
- vk::DescriptorSetLayout layout)
+ VkDescriptorSetLayout layout)
: VKFencedPool{SETS_GROW_RATE}, descriptor_pool{descriptor_pool}, layout{layout} {}
DescriptorAllocator::~DescriptorAllocator() = default;
-vk::DescriptorSet DescriptorAllocator::Commit(VKFence& fence) {
- return *descriptors[CommitResource(fence)];
+VkDescriptorSet DescriptorAllocator::Commit(VKFence& fence) {
+ const std::size_t index = CommitResource(fence);
+ return descriptors_allocations[index / SETS_GROW_RATE][index % SETS_GROW_RATE];
}
void DescriptorAllocator::Allocate(std::size_t begin, std::size_t end) {
- auto new_sets = descriptor_pool.AllocateDescriptors(layout, end - begin);
- descriptors.insert(descriptors.end(), std::make_move_iterator(new_sets.begin()),
- std::make_move_iterator(new_sets.end()));
+ descriptors_allocations.push_back(descriptor_pool.AllocateDescriptors(layout, end - begin));
}
VKDescriptorPool::VKDescriptorPool(const VKDevice& device)
@@ -37,53 +36,50 @@ VKDescriptorPool::VKDescriptorPool(const VKDevice& device)
VKDescriptorPool::~VKDescriptorPool() = default;
-vk::DescriptorPool VKDescriptorPool::AllocateNewPool() {
+vk::DescriptorPool* VKDescriptorPool::AllocateNewPool() {
static constexpr u32 num_sets = 0x20000;
- static constexpr vk::DescriptorPoolSize pool_sizes[] = {
- {vk::DescriptorType::eUniformBuffer, num_sets * 90},
- {vk::DescriptorType::eStorageBuffer, num_sets * 60},
- {vk::DescriptorType::eUniformTexelBuffer, num_sets * 64},
- {vk::DescriptorType::eCombinedImageSampler, num_sets * 64},
- {vk::DescriptorType::eStorageImage, num_sets * 40}};
-
- const vk::DescriptorPoolCreateInfo create_info(
- vk::DescriptorPoolCreateFlagBits::eFreeDescriptorSet, num_sets,
- static_cast<u32>(std::size(pool_sizes)), std::data(pool_sizes));
- const auto dev = device.GetLogical();
- return *pools.emplace_back(
- dev.createDescriptorPoolUnique(create_info, nullptr, device.GetDispatchLoader()));
+ static constexpr VkDescriptorPoolSize pool_sizes[] = {
+ {VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, num_sets * 90},
+ {VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, num_sets * 60},
+ {VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, num_sets * 64},
+ {VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, num_sets * 64},
+ {VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, num_sets * 40}};
+
+ VkDescriptorPoolCreateInfo ci;
+ ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
+ ci.pNext = nullptr;
+ ci.flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT;
+ ci.maxSets = num_sets;
+ ci.poolSizeCount = static_cast<u32>(std::size(pool_sizes));
+ ci.pPoolSizes = std::data(pool_sizes);
+ return &pools.emplace_back(device.GetLogical().CreateDescriptorPool(ci));
}
-std::vector<UniqueDescriptorSet> VKDescriptorPool::AllocateDescriptors(
- vk::DescriptorSetLayout layout, std::size_t count) {
- std::vector layout_copies(count, layout);
- vk::DescriptorSetAllocateInfo allocate_info(active_pool, static_cast<u32>(count),
- layout_copies.data());
-
- std::vector<vk::DescriptorSet> sets(count);
- const auto dev = device.GetLogical();
- const auto& dld = device.GetDispatchLoader();
- switch (const auto result = dev.allocateDescriptorSets(&allocate_info, sets.data(), dld)) {
- case vk::Result::eSuccess:
- break;
- case vk::Result::eErrorOutOfPoolMemory:
- active_pool = AllocateNewPool();
- allocate_info.descriptorPool = active_pool;
- if (dev.allocateDescriptorSets(&allocate_info, sets.data(), dld) == vk::Result::eSuccess) {
- break;
- }
- [[fallthrough]];
- default:
- vk::throwResultException(result, "vk::Device::allocateDescriptorSetsUnique");
+vk::DescriptorSets VKDescriptorPool::AllocateDescriptors(VkDescriptorSetLayout layout,
+ std::size_t count) {
+ const std::vector layout_copies(count, layout);
+ VkDescriptorSetAllocateInfo ai;
+ ai.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
+ ai.pNext = nullptr;
+ ai.descriptorPool = **active_pool;
+ ai.descriptorSetCount = static_cast<u32>(count);
+ ai.pSetLayouts = layout_copies.data();
+
+ vk::DescriptorSets sets = active_pool->Allocate(ai);
+ if (!sets.IsOutOfPoolMemory()) {
+ return sets;
}
- vk::PoolFree deleter(dev, active_pool, dld);
- std::vector<UniqueDescriptorSet> unique_sets;
- unique_sets.reserve(count);
- for (const auto set : sets) {
- unique_sets.push_back(UniqueDescriptorSet{set, deleter});
+ // Our current pool is out of memory. Allocate a new one and retry
+ active_pool = AllocateNewPool();
+ ai.descriptorPool = **active_pool;
+ sets = active_pool->Allocate(ai);
+ if (!sets.IsOutOfPoolMemory()) {
+ return sets;
}
- return unique_sets;
+
+ // After allocating a new pool, we are out of memory again. We can't handle this from here.
+ throw vk::Exception(VK_ERROR_OUT_OF_POOL_MEMORY);
}
} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_descriptor_pool.h b/src/video_core/renderer_vulkan/vk_descriptor_pool.h
index a441dbc0f..ab40c70f0 100644
--- a/src/video_core/renderer_vulkan/vk_descriptor_pool.h
+++ b/src/video_core/renderer_vulkan/vk_descriptor_pool.h
@@ -8,8 +8,8 @@
#include <vector>
#include "common/common_types.h"
-#include "video_core/renderer_vulkan/declarations.h"
#include "video_core/renderer_vulkan/vk_resource_manager.h"
+#include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan {
@@ -17,21 +17,21 @@ class VKDescriptorPool;
class DescriptorAllocator final : public VKFencedPool {
public:
- explicit DescriptorAllocator(VKDescriptorPool& descriptor_pool, vk::DescriptorSetLayout layout);
+ explicit DescriptorAllocator(VKDescriptorPool& descriptor_pool, VkDescriptorSetLayout layout);
~DescriptorAllocator() override;
DescriptorAllocator(const DescriptorAllocator&) = delete;
- vk::DescriptorSet Commit(VKFence& fence);
+ VkDescriptorSet Commit(VKFence& fence);
protected:
void Allocate(std::size_t begin, std::size_t end) override;
private:
VKDescriptorPool& descriptor_pool;
- const vk::DescriptorSetLayout layout;
+ const VkDescriptorSetLayout layout;
- std::vector<UniqueDescriptorSet> descriptors;
+ std::vector<vk::DescriptorSets> descriptors_allocations;
};
class VKDescriptorPool final {
@@ -42,15 +42,14 @@ public:
~VKDescriptorPool();
private:
- vk::DescriptorPool AllocateNewPool();
+ vk::DescriptorPool* AllocateNewPool();
- std::vector<UniqueDescriptorSet> AllocateDescriptors(vk::DescriptorSetLayout layout,
- std::size_t count);
+ vk::DescriptorSets AllocateDescriptors(VkDescriptorSetLayout layout, std::size_t count);
const VKDevice& device;
- std::vector<UniqueDescriptorPool> pools;
- vk::DescriptorPool active_pool;
+ std::vector<vk::DescriptorPool> pools;
+ vk::DescriptorPool* active_pool;
};
} // namespace Vulkan \ No newline at end of file
diff --git a/src/video_core/renderer_vulkan/vk_device.cpp b/src/video_core/renderer_vulkan/vk_device.cpp
index 28d2fbc4f..52d29e49d 100644
--- a/src/video_core/renderer_vulkan/vk_device.cpp
+++ b/src/video_core/renderer_vulkan/vk_device.cpp
@@ -6,14 +6,15 @@
#include <chrono>
#include <cstdlib>
#include <optional>
-#include <set>
#include <string_view>
#include <thread>
+#include <unordered_set>
#include <vector>
+
#include "common/assert.h"
#include "core/settings.h"
-#include "video_core/renderer_vulkan/declarations.h"
#include "video_core/renderer_vulkan/vk_device.h"
+#include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan {
@@ -21,49 +22,43 @@ namespace {
namespace Alternatives {
-constexpr std::array Depth24UnormS8Uint = {vk::Format::eD32SfloatS8Uint,
- vk::Format::eD16UnormS8Uint, vk::Format{}};
-constexpr std::array Depth16UnormS8Uint = {vk::Format::eD24UnormS8Uint,
- vk::Format::eD32SfloatS8Uint, vk::Format{}};
+constexpr std::array Depth24UnormS8_UINT = {VK_FORMAT_D32_SFLOAT_S8_UINT,
+ VK_FORMAT_D16_UNORM_S8_UINT, VkFormat{}};
+constexpr std::array Depth16UnormS8_UINT = {VK_FORMAT_D24_UNORM_S8_UINT,
+ VK_FORMAT_D32_SFLOAT_S8_UINT, VkFormat{}};
} // namespace Alternatives
+constexpr std::array REQUIRED_EXTENSIONS = {
+ VK_KHR_SWAPCHAIN_EXTENSION_NAME,
+ VK_KHR_16BIT_STORAGE_EXTENSION_NAME,
+ VK_KHR_8BIT_STORAGE_EXTENSION_NAME,
+ VK_KHR_DRIVER_PROPERTIES_EXTENSION_NAME,
+ VK_KHR_DESCRIPTOR_UPDATE_TEMPLATE_EXTENSION_NAME,
+ VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_EXTENSION_NAME,
+ VK_EXT_SHADER_SUBGROUP_BALLOT_EXTENSION_NAME,
+ VK_EXT_SHADER_SUBGROUP_VOTE_EXTENSION_NAME,
+ VK_EXT_HOST_QUERY_RESET_EXTENSION_NAME,
+};
+
template <typename T>
void SetNext(void**& next, T& data) {
*next = &data;
next = &data.pNext;
}
-template <typename T>
-T GetFeatures(vk::PhysicalDevice physical, const vk::DispatchLoaderDynamic& dldi) {
- vk::PhysicalDeviceFeatures2 features;
- T extension_features;
- features.pNext = &extension_features;
- physical.getFeatures2(&features, dldi);
- return extension_features;
-}
-
-template <typename T>
-T GetProperties(vk::PhysicalDevice physical, const vk::DispatchLoaderDynamic& dldi) {
- vk::PhysicalDeviceProperties2 properties;
- T extension_properties;
- properties.pNext = &extension_properties;
- physical.getProperties2(&properties, dldi);
- return extension_properties;
-}
-
-constexpr const vk::Format* GetFormatAlternatives(vk::Format format) {
+constexpr const VkFormat* GetFormatAlternatives(VkFormat format) {
switch (format) {
- case vk::Format::eD24UnormS8Uint:
- return Alternatives::Depth24UnormS8Uint.data();
- case vk::Format::eD16UnormS8Uint:
- return Alternatives::Depth16UnormS8Uint.data();
+ case VK_FORMAT_D24_UNORM_S8_UINT:
+ return Alternatives::Depth24UnormS8_UINT.data();
+ case VK_FORMAT_D16_UNORM_S8_UINT:
+ return Alternatives::Depth16UnormS8_UINT.data();
default:
return nullptr;
}
}
-vk::FormatFeatureFlags GetFormatFeatures(vk::FormatProperties properties, FormatType format_type) {
+VkFormatFeatureFlags GetFormatFeatures(VkFormatProperties properties, FormatType format_type) {
switch (format_type) {
case FormatType::Linear:
return properties.linearTilingFeatures;
@@ -76,79 +71,220 @@ vk::FormatFeatureFlags GetFormatFeatures(vk::FormatProperties properties, Format
}
}
+std::unordered_map<VkFormat, VkFormatProperties> GetFormatProperties(
+ vk::PhysicalDevice physical, const vk::InstanceDispatch& dld) {
+ static constexpr std::array formats{VK_FORMAT_A8B8G8R8_UNORM_PACK32,
+ VK_FORMAT_A8B8G8R8_UINT_PACK32,
+ VK_FORMAT_A8B8G8R8_SNORM_PACK32,
+ VK_FORMAT_A8B8G8R8_SRGB_PACK32,
+ VK_FORMAT_B5G6R5_UNORM_PACK16,
+ VK_FORMAT_A2B10G10R10_UNORM_PACK32,
+ VK_FORMAT_A1R5G5B5_UNORM_PACK16,
+ VK_FORMAT_R32G32B32A32_SFLOAT,
+ VK_FORMAT_R32G32B32A32_UINT,
+ VK_FORMAT_R32G32_SFLOAT,
+ VK_FORMAT_R32G32_UINT,
+ VK_FORMAT_R16G16B16A16_UINT,
+ VK_FORMAT_R16G16B16A16_SNORM,
+ VK_FORMAT_R16G16B16A16_UNORM,
+ VK_FORMAT_R16G16_UNORM,
+ VK_FORMAT_R16G16_SNORM,
+ VK_FORMAT_R16G16_SFLOAT,
+ VK_FORMAT_R16_UNORM,
+ VK_FORMAT_R8G8B8A8_SRGB,
+ VK_FORMAT_R8G8_UNORM,
+ VK_FORMAT_R8G8_SNORM,
+ VK_FORMAT_R8_UNORM,
+ VK_FORMAT_R8_UINT,
+ VK_FORMAT_B10G11R11_UFLOAT_PACK32,
+ VK_FORMAT_R32_SFLOAT,
+ VK_FORMAT_R32_UINT,
+ VK_FORMAT_R32_SINT,
+ VK_FORMAT_R16_SFLOAT,
+ VK_FORMAT_R16G16B16A16_SFLOAT,
+ VK_FORMAT_B8G8R8A8_UNORM,
+ VK_FORMAT_R4G4B4A4_UNORM_PACK16,
+ VK_FORMAT_D32_SFLOAT,
+ VK_FORMAT_D16_UNORM,
+ VK_FORMAT_D16_UNORM_S8_UINT,
+ VK_FORMAT_D24_UNORM_S8_UINT,
+ VK_FORMAT_D32_SFLOAT_S8_UINT,
+ VK_FORMAT_BC1_RGBA_UNORM_BLOCK,
+ VK_FORMAT_BC2_UNORM_BLOCK,
+ VK_FORMAT_BC3_UNORM_BLOCK,
+ VK_FORMAT_BC4_UNORM_BLOCK,
+ VK_FORMAT_BC5_UNORM_BLOCK,
+ VK_FORMAT_BC5_SNORM_BLOCK,
+ VK_FORMAT_BC7_UNORM_BLOCK,
+ VK_FORMAT_BC6H_UFLOAT_BLOCK,
+ VK_FORMAT_BC6H_SFLOAT_BLOCK,
+ VK_FORMAT_BC1_RGBA_SRGB_BLOCK,
+ VK_FORMAT_BC2_SRGB_BLOCK,
+ VK_FORMAT_BC3_SRGB_BLOCK,
+ VK_FORMAT_BC7_SRGB_BLOCK,
+ VK_FORMAT_ASTC_4x4_SRGB_BLOCK,
+ VK_FORMAT_ASTC_8x8_SRGB_BLOCK,
+ VK_FORMAT_ASTC_8x5_SRGB_BLOCK,
+ VK_FORMAT_ASTC_5x4_SRGB_BLOCK,
+ VK_FORMAT_ASTC_5x5_UNORM_BLOCK,
+ VK_FORMAT_ASTC_5x5_SRGB_BLOCK,
+ VK_FORMAT_ASTC_10x8_UNORM_BLOCK,
+ VK_FORMAT_ASTC_10x8_SRGB_BLOCK,
+ VK_FORMAT_ASTC_6x6_UNORM_BLOCK,
+ VK_FORMAT_ASTC_6x6_SRGB_BLOCK,
+ VK_FORMAT_ASTC_10x10_UNORM_BLOCK,
+ VK_FORMAT_ASTC_10x10_SRGB_BLOCK,
+ VK_FORMAT_ASTC_12x12_UNORM_BLOCK,
+ VK_FORMAT_ASTC_12x12_SRGB_BLOCK,
+ VK_FORMAT_ASTC_8x6_UNORM_BLOCK,
+ VK_FORMAT_ASTC_8x6_SRGB_BLOCK,
+ VK_FORMAT_ASTC_6x5_UNORM_BLOCK,
+ VK_FORMAT_ASTC_6x5_SRGB_BLOCK,
+ VK_FORMAT_E5B9G9R9_UFLOAT_PACK32};
+ std::unordered_map<VkFormat, VkFormatProperties> format_properties;
+ for (const auto format : formats) {
+ format_properties.emplace(format, physical.GetFormatProperties(format));
+ }
+ return format_properties;
+}
+
} // Anonymous namespace
-VKDevice::VKDevice(const vk::DispatchLoaderDynamic& dldi, vk::PhysicalDevice physical,
- vk::SurfaceKHR surface)
- : physical{physical}, properties{physical.getProperties(dldi)},
- format_properties{GetFormatProperties(dldi, physical)} {
- SetupFamilies(dldi, surface);
- SetupFeatures(dldi);
+VKDevice::VKDevice(VkInstance instance, vk::PhysicalDevice physical, VkSurfaceKHR surface,
+ const vk::InstanceDispatch& dld)
+ : dld{dld}, physical{physical}, properties{physical.GetProperties()},
+ format_properties{GetFormatProperties(physical, dld)} {
+ SetupFamilies(surface);
+ SetupFeatures();
}
VKDevice::~VKDevice() = default;
-bool VKDevice::Create(const vk::DispatchLoaderDynamic& dldi, vk::Instance instance) {
+bool VKDevice::Create() {
const auto queue_cis = GetDeviceQueueCreateInfos();
- const std::vector extensions = LoadExtensions(dldi);
+ const std::vector extensions = LoadExtensions();
- vk::PhysicalDeviceFeatures2 features2;
+ VkPhysicalDeviceFeatures2 features2;
+ features2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
+ features2.pNext = nullptr;
void** next = &features2.pNext;
+
auto& features = features2.features;
- features.vertexPipelineStoresAndAtomics = true;
+ features.robustBufferAccess = false;
+ features.fullDrawIndexUint32 = false;
+ features.imageCubeArray = false;
features.independentBlend = true;
+ features.geometryShader = true;
+ features.tessellationShader = true;
+ features.sampleRateShading = false;
+ features.dualSrcBlend = false;
+ features.logicOp = false;
+ features.multiDrawIndirect = false;
+ features.drawIndirectFirstInstance = false;
features.depthClamp = true;
- features.samplerAnisotropy = true;
+ features.depthBiasClamp = true;
+ features.fillModeNonSolid = false;
+ features.depthBounds = false;
+ features.wideLines = false;
features.largePoints = true;
+ features.alphaToOne = false;
features.multiViewport = true;
- features.depthBiasClamp = true;
- features.geometryShader = true;
- features.tessellationShader = true;
+ features.samplerAnisotropy = true;
+ features.textureCompressionETC2 = false;
+ features.textureCompressionASTC_LDR = is_optimal_astc_supported;
+ features.textureCompressionBC = false;
features.occlusionQueryPrecise = true;
+ features.pipelineStatisticsQuery = false;
+ features.vertexPipelineStoresAndAtomics = true;
features.fragmentStoresAndAtomics = true;
+ features.shaderTessellationAndGeometryPointSize = false;
features.shaderImageGatherExtended = true;
+ features.shaderStorageImageExtendedFormats = false;
+ features.shaderStorageImageMultisample = false;
features.shaderStorageImageReadWithoutFormat = is_formatless_image_load_supported;
features.shaderStorageImageWriteWithoutFormat = true;
- features.textureCompressionASTC_LDR = is_optimal_astc_supported;
-
- vk::PhysicalDevice16BitStorageFeaturesKHR bit16_storage;
+ features.shaderUniformBufferArrayDynamicIndexing = false;
+ features.shaderSampledImageArrayDynamicIndexing = false;
+ features.shaderStorageBufferArrayDynamicIndexing = false;
+ features.shaderStorageImageArrayDynamicIndexing = false;
+ features.shaderClipDistance = false;
+ features.shaderCullDistance = false;
+ features.shaderFloat64 = false;
+ features.shaderInt64 = false;
+ features.shaderInt16 = false;
+ features.shaderResourceResidency = false;
+ features.shaderResourceMinLod = false;
+ features.sparseBinding = false;
+ features.sparseResidencyBuffer = false;
+ features.sparseResidencyImage2D = false;
+ features.sparseResidencyImage3D = false;
+ features.sparseResidency2Samples = false;
+ features.sparseResidency4Samples = false;
+ features.sparseResidency8Samples = false;
+ features.sparseResidency16Samples = false;
+ features.sparseResidencyAliased = false;
+ features.variableMultisampleRate = false;
+ features.inheritedQueries = false;
+
+ VkPhysicalDevice16BitStorageFeaturesKHR bit16_storage;
+ bit16_storage.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES_KHR;
+ bit16_storage.pNext = nullptr;
+ bit16_storage.storageBuffer16BitAccess = false;
bit16_storage.uniformAndStorageBuffer16BitAccess = true;
+ bit16_storage.storagePushConstant16 = false;
+ bit16_storage.storageInputOutput16 = false;
SetNext(next, bit16_storage);
- vk::PhysicalDevice8BitStorageFeaturesKHR bit8_storage;
+ VkPhysicalDevice8BitStorageFeaturesKHR bit8_storage;
+ bit8_storage.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES_KHR;
+ bit8_storage.pNext = nullptr;
+ bit8_storage.storageBuffer8BitAccess = false;
bit8_storage.uniformAndStorageBuffer8BitAccess = true;
+ bit8_storage.storagePushConstant8 = false;
SetNext(next, bit8_storage);
- vk::PhysicalDeviceHostQueryResetFeaturesEXT host_query_reset;
+ VkPhysicalDeviceHostQueryResetFeaturesEXT host_query_reset;
+ host_query_reset.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES_EXT;
host_query_reset.hostQueryReset = true;
SetNext(next, host_query_reset);
- vk::PhysicalDeviceFloat16Int8FeaturesKHR float16_int8;
+ VkPhysicalDeviceFloat16Int8FeaturesKHR float16_int8;
if (is_float16_supported) {
+ float16_int8.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT16_INT8_FEATURES_KHR;
+ float16_int8.pNext = nullptr;
float16_int8.shaderFloat16 = true;
+ float16_int8.shaderInt8 = false;
SetNext(next, float16_int8);
} else {
LOG_INFO(Render_Vulkan, "Device doesn't support float16 natively");
}
- vk::PhysicalDeviceUniformBufferStandardLayoutFeaturesKHR std430_layout;
+ VkPhysicalDeviceUniformBufferStandardLayoutFeaturesKHR std430_layout;
if (khr_uniform_buffer_standard_layout) {
+ std430_layout.sType =
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_UNIFORM_BUFFER_STANDARD_LAYOUT_FEATURES_KHR;
+ std430_layout.pNext = nullptr;
std430_layout.uniformBufferStandardLayout = true;
SetNext(next, std430_layout);
} else {
LOG_INFO(Render_Vulkan, "Device doesn't support packed UBOs");
}
- vk::PhysicalDeviceIndexTypeUint8FeaturesEXT index_type_uint8;
+ VkPhysicalDeviceIndexTypeUint8FeaturesEXT index_type_uint8;
if (ext_index_type_uint8) {
+ index_type_uint8.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_EXT;
+ index_type_uint8.pNext = nullptr;
index_type_uint8.indexTypeUint8 = true;
SetNext(next, index_type_uint8);
} else {
LOG_INFO(Render_Vulkan, "Device doesn't support uint8 indexes");
}
- vk::PhysicalDeviceTransformFeedbackFeaturesEXT transform_feedback;
+ VkPhysicalDeviceTransformFeedbackFeaturesEXT transform_feedback;
if (ext_transform_feedback) {
+ transform_feedback.sType =
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_FEATURES_EXT;
+ transform_feedback.pNext = nullptr;
transform_feedback.transformFeedback = true;
transform_feedback.geometryStreams = true;
SetNext(next, transform_feedback);
@@ -160,62 +296,48 @@ bool VKDevice::Create(const vk::DispatchLoaderDynamic& dldi, vk::Instance instan
LOG_INFO(Render_Vulkan, "Device doesn't support depth range unrestricted");
}
- vk::DeviceCreateInfo device_ci({}, static_cast<u32>(queue_cis.size()), queue_cis.data(), 0,
- nullptr, static_cast<u32>(extensions.size()), extensions.data(),
- nullptr);
- device_ci.pNext = &features2;
-
- vk::Device dummy_logical;
- if (physical.createDevice(&device_ci, nullptr, &dummy_logical, dldi) != vk::Result::eSuccess) {
- LOG_CRITICAL(Render_Vulkan, "Logical device failed to be created!");
+ logical = vk::Device::Create(physical, queue_cis, extensions, features2, dld);
+ if (!logical) {
+ LOG_ERROR(Render_Vulkan, "Failed to create logical device");
return false;
}
- dld.init(instance, dldi.vkGetInstanceProcAddr, dummy_logical, dldi.vkGetDeviceProcAddr);
- logical = UniqueDevice(
- dummy_logical, vk::ObjectDestroy<vk::NoParent, vk::DispatchLoaderDynamic>(nullptr, dld));
-
CollectTelemetryParameters();
- graphics_queue = logical->getQueue(graphics_family, 0, dld);
- present_queue = logical->getQueue(present_family, 0, dld);
+ graphics_queue = logical.GetQueue(graphics_family);
+ present_queue = logical.GetQueue(present_family);
return true;
}
-vk::Format VKDevice::GetSupportedFormat(vk::Format wanted_format,
- vk::FormatFeatureFlags wanted_usage,
- FormatType format_type) const {
+VkFormat VKDevice::GetSupportedFormat(VkFormat wanted_format, VkFormatFeatureFlags wanted_usage,
+ FormatType format_type) const {
if (IsFormatSupported(wanted_format, wanted_usage, format_type)) {
return wanted_format;
}
// The wanted format is not supported by hardware, search for alternatives
- const vk::Format* alternatives = GetFormatAlternatives(wanted_format);
+ const VkFormat* alternatives = GetFormatAlternatives(wanted_format);
if (alternatives == nullptr) {
UNREACHABLE_MSG("Format={} with usage={} and type={} has no defined alternatives and host "
"hardware does not support it",
- vk::to_string(wanted_format), vk::to_string(wanted_usage),
- static_cast<u32>(format_type));
+ wanted_format, wanted_usage, format_type);
return wanted_format;
}
std::size_t i = 0;
- for (vk::Format alternative = alternatives[0]; alternative != vk::Format{};
- alternative = alternatives[++i]) {
+ for (VkFormat alternative = *alternatives; alternative; alternative = alternatives[++i]) {
if (!IsFormatSupported(alternative, wanted_usage, format_type)) {
continue;
}
LOG_WARNING(Render_Vulkan,
"Emulating format={} with alternative format={} with usage={} and type={}",
- static_cast<u32>(wanted_format), static_cast<u32>(alternative),
- static_cast<u32>(wanted_usage), static_cast<u32>(format_type));
+ wanted_format, alternative, wanted_usage, format_type);
return alternative;
}
// No alternatives found, panic
UNREACHABLE_MSG("Format={} with usage={} and type={} is not supported by the host hardware and "
"doesn't support any of the alternatives",
- static_cast<u32>(wanted_format), static_cast<u32>(wanted_usage),
- static_cast<u32>(format_type));
+ wanted_format, wanted_usage, format_type);
return wanted_format;
}
@@ -229,35 +351,39 @@ void VKDevice::ReportLoss() const {
return;
}
- [[maybe_unused]] const std::vector data = graphics_queue.getCheckpointDataNV(dld);
+ [[maybe_unused]] const std::vector data = graphics_queue.GetCheckpointDataNV(dld);
// Catch here in debug builds (or with optimizations disabled) the last graphics pipeline to be
// executed. It can be done on a debugger by evaluating the expression:
// *(VKGraphicsPipeline*)data[0]
}
-bool VKDevice::IsOptimalAstcSupported(const vk::PhysicalDeviceFeatures& features,
- const vk::DispatchLoaderDynamic& dldi) const {
+bool VKDevice::IsOptimalAstcSupported(const VkPhysicalDeviceFeatures& features) const {
// Disable for now to avoid converting ASTC twice.
- return false;
static constexpr std::array astc_formats = {
- vk::Format::eAstc4x4SrgbBlock, vk::Format::eAstc8x8SrgbBlock,
- vk::Format::eAstc8x5SrgbBlock, vk::Format::eAstc5x4SrgbBlock,
- vk::Format::eAstc5x5UnormBlock, vk::Format::eAstc5x5SrgbBlock,
- vk::Format::eAstc10x8UnormBlock, vk::Format::eAstc10x8SrgbBlock,
- vk::Format::eAstc6x6UnormBlock, vk::Format::eAstc6x6SrgbBlock,
- vk::Format::eAstc10x10UnormBlock, vk::Format::eAstc10x10SrgbBlock,
- vk::Format::eAstc12x12UnormBlock, vk::Format::eAstc12x12SrgbBlock,
- vk::Format::eAstc8x6UnormBlock, vk::Format::eAstc8x6SrgbBlock,
- vk::Format::eAstc6x5UnormBlock, vk::Format::eAstc6x5SrgbBlock};
+ VK_FORMAT_ASTC_4x4_UNORM_BLOCK, VK_FORMAT_ASTC_4x4_SRGB_BLOCK,
+ VK_FORMAT_ASTC_5x4_UNORM_BLOCK, VK_FORMAT_ASTC_5x4_SRGB_BLOCK,
+ VK_FORMAT_ASTC_5x5_UNORM_BLOCK, VK_FORMAT_ASTC_5x5_SRGB_BLOCK,
+ VK_FORMAT_ASTC_6x5_UNORM_BLOCK, VK_FORMAT_ASTC_6x5_SRGB_BLOCK,
+ VK_FORMAT_ASTC_6x6_UNORM_BLOCK, VK_FORMAT_ASTC_6x6_SRGB_BLOCK,
+ VK_FORMAT_ASTC_8x5_UNORM_BLOCK, VK_FORMAT_ASTC_8x5_SRGB_BLOCK,
+ VK_FORMAT_ASTC_8x6_UNORM_BLOCK, VK_FORMAT_ASTC_8x6_SRGB_BLOCK,
+ VK_FORMAT_ASTC_8x8_UNORM_BLOCK, VK_FORMAT_ASTC_8x8_SRGB_BLOCK,
+ VK_FORMAT_ASTC_10x5_UNORM_BLOCK, VK_FORMAT_ASTC_10x5_SRGB_BLOCK,
+ VK_FORMAT_ASTC_10x6_UNORM_BLOCK, VK_FORMAT_ASTC_10x6_SRGB_BLOCK,
+ VK_FORMAT_ASTC_10x8_UNORM_BLOCK, VK_FORMAT_ASTC_10x8_SRGB_BLOCK,
+ VK_FORMAT_ASTC_10x10_UNORM_BLOCK, VK_FORMAT_ASTC_10x10_SRGB_BLOCK,
+ VK_FORMAT_ASTC_12x10_UNORM_BLOCK, VK_FORMAT_ASTC_12x10_SRGB_BLOCK,
+ VK_FORMAT_ASTC_12x12_UNORM_BLOCK, VK_FORMAT_ASTC_12x12_SRGB_BLOCK,
+ };
if (!features.textureCompressionASTC_LDR) {
return false;
}
const auto format_feature_usage{
- vk::FormatFeatureFlagBits::eSampledImage | vk::FormatFeatureFlagBits::eBlitSrc |
- vk::FormatFeatureFlagBits::eBlitDst | vk::FormatFeatureFlagBits::eTransferSrc |
- vk::FormatFeatureFlagBits::eTransferDst};
+ VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT | VK_FORMAT_FEATURE_BLIT_SRC_BIT |
+ VK_FORMAT_FEATURE_BLIT_DST_BIT | VK_FORMAT_FEATURE_TRANSFER_SRC_BIT |
+ VK_FORMAT_FEATURE_TRANSFER_DST_BIT};
for (const auto format : astc_formats) {
- const auto format_properties{physical.getFormatProperties(format, dldi)};
+ const auto format_properties{physical.GetFormatProperties(format)};
if (!(format_properties.optimalTilingFeatures & format_feature_usage)) {
return false;
}
@@ -265,62 +391,49 @@ bool VKDevice::IsOptimalAstcSupported(const vk::PhysicalDeviceFeatures& features
return true;
}
-bool VKDevice::IsFormatSupported(vk::Format wanted_format, vk::FormatFeatureFlags wanted_usage,
+bool VKDevice::IsFormatSupported(VkFormat wanted_format, VkFormatFeatureFlags wanted_usage,
FormatType format_type) const {
const auto it = format_properties.find(wanted_format);
if (it == format_properties.end()) {
- UNIMPLEMENTED_MSG("Unimplemented format query={}", vk::to_string(wanted_format));
+ UNIMPLEMENTED_MSG("Unimplemented format query={}", wanted_format);
return true;
}
const auto supported_usage = GetFormatFeatures(it->second, format_type);
return (supported_usage & wanted_usage) == wanted_usage;
}
-bool VKDevice::IsSuitable(const vk::DispatchLoaderDynamic& dldi, vk::PhysicalDevice physical,
- vk::SurfaceKHR surface) {
+bool VKDevice::IsSuitable(vk::PhysicalDevice physical, VkSurfaceKHR surface) {
bool is_suitable = true;
+ std::bitset<REQUIRED_EXTENSIONS.size()> available_extensions;
- constexpr std::array required_extensions = {
- VK_KHR_SWAPCHAIN_EXTENSION_NAME,
- VK_KHR_16BIT_STORAGE_EXTENSION_NAME,
- VK_KHR_8BIT_STORAGE_EXTENSION_NAME,
- VK_KHR_DRIVER_PROPERTIES_EXTENSION_NAME,
- VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_EXTENSION_NAME,
- VK_EXT_SHADER_SUBGROUP_BALLOT_EXTENSION_NAME,
- VK_EXT_SHADER_SUBGROUP_VOTE_EXTENSION_NAME,
- VK_EXT_HOST_QUERY_RESET_EXTENSION_NAME,
- };
- std::bitset<required_extensions.size()> available_extensions{};
-
- for (const auto& prop : physical.enumerateDeviceExtensionProperties(nullptr, dldi)) {
- for (std::size_t i = 0; i < required_extensions.size(); ++i) {
+ for (const auto& prop : physical.EnumerateDeviceExtensionProperties()) {
+ for (std::size_t i = 0; i < REQUIRED_EXTENSIONS.size(); ++i) {
if (available_extensions[i]) {
continue;
}
- available_extensions[i] =
- required_extensions[i] == std::string_view{prop.extensionName};
+ const std::string_view name{prop.extensionName};
+ available_extensions[i] = name == REQUIRED_EXTENSIONS[i];
}
}
if (!available_extensions.all()) {
- for (std::size_t i = 0; i < required_extensions.size(); ++i) {
+ for (std::size_t i = 0; i < REQUIRED_EXTENSIONS.size(); ++i) {
if (available_extensions[i]) {
continue;
}
- LOG_ERROR(Render_Vulkan, "Missing required extension: {}", required_extensions[i]);
+ LOG_ERROR(Render_Vulkan, "Missing required extension: {}", REQUIRED_EXTENSIONS[i]);
is_suitable = false;
}
}
bool has_graphics{}, has_present{};
- const auto queue_family_properties = physical.getQueueFamilyProperties(dldi);
+ const std::vector queue_family_properties = physical.GetQueueFamilyProperties();
for (u32 i = 0; i < static_cast<u32>(queue_family_properties.size()); ++i) {
const auto& family = queue_family_properties[i];
if (family.queueCount == 0) {
continue;
}
- has_graphics |=
- (family.queueFlags & vk::QueueFlagBits::eGraphics) != static_cast<vk::QueueFlagBits>(0);
- has_present |= physical.getSurfaceSupportKHR(i, surface, dldi) != 0;
+ has_graphics |= family.queueFlags & VK_QUEUE_GRAPHICS_BIT;
+ has_present |= physical.GetSurfaceSupportKHR(i, surface);
}
if (!has_graphics || !has_present) {
LOG_ERROR(Render_Vulkan, "Device lacks a graphics and present queue");
@@ -328,7 +441,7 @@ bool VKDevice::IsSuitable(const vk::DispatchLoaderDynamic& dldi, vk::PhysicalDev
}
// TODO(Rodrigo): Check if the device matches all requeriments.
- const auto properties{physical.getProperties(dldi)};
+ const auto properties{physical.GetProperties()};
const auto& limits{properties.limits};
constexpr u32 required_ubo_size = 65536;
@@ -345,7 +458,7 @@ bool VKDevice::IsSuitable(const vk::DispatchLoaderDynamic& dldi, vk::PhysicalDev
is_suitable = false;
}
- const auto features{physical.getFeatures(dldi)};
+ const auto features{physical.GetFeatures()};
const std::array feature_report = {
std::make_pair(features.vertexPipelineStoresAndAtomics, "vertexPipelineStoresAndAtomics"),
std::make_pair(features.independentBlend, "independentBlend"),
@@ -377,9 +490,9 @@ bool VKDevice::IsSuitable(const vk::DispatchLoaderDynamic& dldi, vk::PhysicalDev
return is_suitable;
}
-std::vector<const char*> VKDevice::LoadExtensions(const vk::DispatchLoaderDynamic& dldi) {
+std::vector<const char*> VKDevice::LoadExtensions() {
std::vector<const char*> extensions;
- const auto Test = [&](const vk::ExtensionProperties& extension,
+ const auto Test = [&](const VkExtensionProperties& extension,
std::optional<std::reference_wrapper<bool>> status, const char* name,
bool push) {
if (extension.extensionName != std::string_view(name)) {
@@ -393,22 +506,13 @@ std::vector<const char*> VKDevice::LoadExtensions(const vk::DispatchLoaderDynami
}
};
- extensions.reserve(15);
- extensions.push_back(VK_KHR_SWAPCHAIN_EXTENSION_NAME);
- extensions.push_back(VK_KHR_16BIT_STORAGE_EXTENSION_NAME);
- extensions.push_back(VK_KHR_8BIT_STORAGE_EXTENSION_NAME);
- extensions.push_back(VK_KHR_DRIVER_PROPERTIES_EXTENSION_NAME);
- extensions.push_back(VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_EXTENSION_NAME);
- extensions.push_back(VK_EXT_SHADER_SUBGROUP_BALLOT_EXTENSION_NAME);
- extensions.push_back(VK_EXT_SHADER_SUBGROUP_VOTE_EXTENSION_NAME);
- extensions.push_back(VK_EXT_HOST_QUERY_RESET_EXTENSION_NAME);
-
- [[maybe_unused]] const bool nsight =
- std::getenv("NVTX_INJECTION64_PATH") || std::getenv("NSIGHT_LAUNCHED");
+ extensions.reserve(7 + REQUIRED_EXTENSIONS.size());
+ extensions.insert(extensions.begin(), REQUIRED_EXTENSIONS.begin(), REQUIRED_EXTENSIONS.end());
+
bool has_khr_shader_float16_int8{};
bool has_ext_subgroup_size_control{};
bool has_ext_transform_feedback{};
- for (const auto& extension : physical.enumerateDeviceExtensionProperties(nullptr, dldi)) {
+ for (const auto& extension : physical.EnumerateDeviceExtensionProperties()) {
Test(extension, khr_uniform_buffer_standard_layout,
VK_KHR_UNIFORM_BUFFER_STANDARD_LAYOUT_EXTENSION_NAME, true);
Test(extension, has_khr_shader_float16_int8, VK_KHR_SHADER_FLOAT16_INT8_EXTENSION_NAME,
@@ -428,38 +532,67 @@ std::vector<const char*> VKDevice::LoadExtensions(const vk::DispatchLoaderDynami
}
}
+ VkPhysicalDeviceFeatures2KHR features;
+ features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2_KHR;
+
+ VkPhysicalDeviceProperties2KHR properties;
+ properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2_KHR;
+
if (has_khr_shader_float16_int8) {
- is_float16_supported =
- GetFeatures<vk::PhysicalDeviceFloat16Int8FeaturesKHR>(physical, dldi).shaderFloat16;
+ VkPhysicalDeviceFloat16Int8FeaturesKHR float16_int8_features;
+ float16_int8_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT16_INT8_FEATURES_KHR;
+ float16_int8_features.pNext = nullptr;
+ features.pNext = &float16_int8_features;
+
+ physical.GetFeatures2KHR(features);
+ is_float16_supported = float16_int8_features.shaderFloat16;
extensions.push_back(VK_KHR_SHADER_FLOAT16_INT8_EXTENSION_NAME);
}
if (has_ext_subgroup_size_control) {
- const auto features =
- GetFeatures<vk::PhysicalDeviceSubgroupSizeControlFeaturesEXT>(physical, dldi);
- const auto properties =
- GetProperties<vk::PhysicalDeviceSubgroupSizeControlPropertiesEXT>(physical, dldi);
-
- is_warp_potentially_bigger = properties.maxSubgroupSize > GuestWarpSize;
-
- if (features.subgroupSizeControl && properties.minSubgroupSize <= GuestWarpSize &&
- properties.maxSubgroupSize >= GuestWarpSize) {
+ VkPhysicalDeviceSubgroupSizeControlFeaturesEXT subgroup_features;
+ subgroup_features.sType =
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT;
+ subgroup_features.pNext = nullptr;
+ features.pNext = &subgroup_features;
+ physical.GetFeatures2KHR(features);
+
+ VkPhysicalDeviceSubgroupSizeControlPropertiesEXT subgroup_properties;
+ subgroup_properties.sType =
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT;
+ subgroup_properties.pNext = nullptr;
+ properties.pNext = &subgroup_properties;
+ physical.GetProperties2KHR(properties);
+
+ is_warp_potentially_bigger = subgroup_properties.maxSubgroupSize > GuestWarpSize;
+
+ if (subgroup_features.subgroupSizeControl &&
+ subgroup_properties.minSubgroupSize <= GuestWarpSize &&
+ subgroup_properties.maxSubgroupSize >= GuestWarpSize) {
extensions.push_back(VK_EXT_SUBGROUP_SIZE_CONTROL_EXTENSION_NAME);
- guest_warp_stages = properties.requiredSubgroupSizeStages;
+ guest_warp_stages = subgroup_properties.requiredSubgroupSizeStages;
}
} else {
is_warp_potentially_bigger = true;
}
if (has_ext_transform_feedback) {
- const auto features =
- GetFeatures<vk::PhysicalDeviceTransformFeedbackFeaturesEXT>(physical, dldi);
- const auto properties =
- GetProperties<vk::PhysicalDeviceTransformFeedbackPropertiesEXT>(physical, dldi);
-
- if (features.transformFeedback && features.geometryStreams &&
- properties.maxTransformFeedbackStreams >= 4 && properties.maxTransformFeedbackBuffers &&
- properties.transformFeedbackQueries && properties.transformFeedbackDraw) {
+ VkPhysicalDeviceTransformFeedbackFeaturesEXT tfb_features;
+ tfb_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_FEATURES_EXT;
+ tfb_features.pNext = nullptr;
+ features.pNext = &tfb_features;
+ physical.GetFeatures2KHR(features);
+
+ VkPhysicalDeviceTransformFeedbackPropertiesEXT tfb_properties;
+ tfb_properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_PROPERTIES_EXT;
+ tfb_properties.pNext = nullptr;
+ properties.pNext = &tfb_properties;
+ physical.GetProperties2KHR(properties);
+
+ if (tfb_features.transformFeedback && tfb_features.geometryStreams &&
+ tfb_properties.maxTransformFeedbackStreams >= 4 &&
+ tfb_properties.maxTransformFeedbackBuffers && tfb_properties.transformFeedbackQueries &&
+ tfb_properties.transformFeedbackDraw) {
extensions.push_back(VK_EXT_TRANSFORM_FEEDBACK_EXTENSION_NAME);
ext_transform_feedback = true;
}
@@ -468,10 +601,10 @@ std::vector<const char*> VKDevice::LoadExtensions(const vk::DispatchLoaderDynami
return extensions;
}
-void VKDevice::SetupFamilies(const vk::DispatchLoaderDynamic& dldi, vk::SurfaceKHR surface) {
+void VKDevice::SetupFamilies(VkSurfaceKHR surface) {
std::optional<u32> graphics_family_, present_family_;
- const auto queue_family_properties = physical.getQueueFamilyProperties(dldi);
+ const std::vector queue_family_properties = physical.GetQueueFamilyProperties();
for (u32 i = 0; i < static_cast<u32>(queue_family_properties.size()); ++i) {
if (graphics_family_ && present_family_)
break;
@@ -480,10 +613,12 @@ void VKDevice::SetupFamilies(const vk::DispatchLoaderDynamic& dldi, vk::SurfaceK
if (queue_family.queueCount == 0)
continue;
- if (queue_family.queueFlags & vk::QueueFlagBits::eGraphics)
+ if (queue_family.queueFlags & VK_QUEUE_GRAPHICS_BIT) {
graphics_family_ = i;
- if (physical.getSurfaceSupportKHR(i, surface, dldi))
+ }
+ if (physical.GetSurfaceSupportKHR(i, surface)) {
present_family_ = i;
+ }
}
ASSERT(graphics_family_ && present_family_);
@@ -491,111 +626,49 @@ void VKDevice::SetupFamilies(const vk::DispatchLoaderDynamic& dldi, vk::SurfaceK
present_family = *present_family_;
}
-void VKDevice::SetupFeatures(const vk::DispatchLoaderDynamic& dldi) {
- const auto supported_features{physical.getFeatures(dldi)};
+void VKDevice::SetupFeatures() {
+ const auto supported_features{physical.GetFeatures()};
is_formatless_image_load_supported = supported_features.shaderStorageImageReadWithoutFormat;
- is_optimal_astc_supported = IsOptimalAstcSupported(supported_features, dldi);
+ is_optimal_astc_supported = IsOptimalAstcSupported(supported_features);
}
void VKDevice::CollectTelemetryParameters() {
- const auto driver = GetProperties<vk::PhysicalDeviceDriverPropertiesKHR>(physical, dld);
+ VkPhysicalDeviceDriverPropertiesKHR driver;
+ driver.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES_KHR;
+ driver.pNext = nullptr;
+
+ VkPhysicalDeviceProperties2KHR properties;
+ properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2_KHR;
+ properties.pNext = &driver;
+ physical.GetProperties2KHR(properties);
+
driver_id = driver.driverID;
vendor_name = driver.driverName;
- const auto extensions = physical.enumerateDeviceExtensionProperties(nullptr, dld);
+ const std::vector extensions = physical.EnumerateDeviceExtensionProperties();
reported_extensions.reserve(std::size(extensions));
for (const auto& extension : extensions) {
reported_extensions.push_back(extension.extensionName);
}
}
-std::vector<vk::DeviceQueueCreateInfo> VKDevice::GetDeviceQueueCreateInfos() const {
- static const float QUEUE_PRIORITY = 1.0f;
+std::vector<VkDeviceQueueCreateInfo> VKDevice::GetDeviceQueueCreateInfos() const {
+ static constexpr float QUEUE_PRIORITY = 1.0f;
- std::set<u32> unique_queue_families = {graphics_family, present_family};
- std::vector<vk::DeviceQueueCreateInfo> queue_cis;
+ std::unordered_set<u32> unique_queue_families = {graphics_family, present_family};
+ std::vector<VkDeviceQueueCreateInfo> queue_cis;
- for (u32 queue_family : unique_queue_families)
- queue_cis.push_back({{}, queue_family, 1, &QUEUE_PRIORITY});
+ for (const u32 queue_family : unique_queue_families) {
+ VkDeviceQueueCreateInfo& ci = queue_cis.emplace_back();
+ ci.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
+ ci.pNext = nullptr;
+ ci.flags = 0;
+ ci.queueFamilyIndex = queue_family;
+ ci.queueCount = 1;
+ ci.pQueuePriorities = &QUEUE_PRIORITY;
+ }
return queue_cis;
}
-std::unordered_map<vk::Format, vk::FormatProperties> VKDevice::GetFormatProperties(
- const vk::DispatchLoaderDynamic& dldi, vk::PhysicalDevice physical) {
- static constexpr std::array formats{vk::Format::eA8B8G8R8UnormPack32,
- vk::Format::eA8B8G8R8UintPack32,
- vk::Format::eA8B8G8R8SnormPack32,
- vk::Format::eA8B8G8R8SrgbPack32,
- vk::Format::eB5G6R5UnormPack16,
- vk::Format::eA2B10G10R10UnormPack32,
- vk::Format::eA1R5G5B5UnormPack16,
- vk::Format::eR32G32B32A32Sfloat,
- vk::Format::eR32G32B32A32Uint,
- vk::Format::eR32G32Sfloat,
- vk::Format::eR32G32Uint,
- vk::Format::eR16G16B16A16Uint,
- vk::Format::eR16G16B16A16Snorm,
- vk::Format::eR16G16B16A16Unorm,
- vk::Format::eR16G16Unorm,
- vk::Format::eR16G16Snorm,
- vk::Format::eR16G16Sfloat,
- vk::Format::eR16Unorm,
- vk::Format::eR8G8B8A8Srgb,
- vk::Format::eR8G8Unorm,
- vk::Format::eR8G8Snorm,
- vk::Format::eR8Unorm,
- vk::Format::eR8Uint,
- vk::Format::eB10G11R11UfloatPack32,
- vk::Format::eR32Sfloat,
- vk::Format::eR32Uint,
- vk::Format::eR32Sint,
- vk::Format::eR16Sfloat,
- vk::Format::eR16G16B16A16Sfloat,
- vk::Format::eB8G8R8A8Unorm,
- vk::Format::eR4G4B4A4UnormPack16,
- vk::Format::eD32Sfloat,
- vk::Format::eD16Unorm,
- vk::Format::eD16UnormS8Uint,
- vk::Format::eD24UnormS8Uint,
- vk::Format::eD32SfloatS8Uint,
- vk::Format::eBc1RgbaUnormBlock,
- vk::Format::eBc2UnormBlock,
- vk::Format::eBc3UnormBlock,
- vk::Format::eBc4UnormBlock,
- vk::Format::eBc5UnormBlock,
- vk::Format::eBc5SnormBlock,
- vk::Format::eBc7UnormBlock,
- vk::Format::eBc6HUfloatBlock,
- vk::Format::eBc6HSfloatBlock,
- vk::Format::eBc1RgbaSrgbBlock,
- vk::Format::eBc2SrgbBlock,
- vk::Format::eBc3SrgbBlock,
- vk::Format::eBc7SrgbBlock,
- vk::Format::eAstc4x4SrgbBlock,
- vk::Format::eAstc8x8SrgbBlock,
- vk::Format::eAstc8x5SrgbBlock,
- vk::Format::eAstc5x4SrgbBlock,
- vk::Format::eAstc5x5UnormBlock,
- vk::Format::eAstc5x5SrgbBlock,
- vk::Format::eAstc10x8UnormBlock,
- vk::Format::eAstc10x8SrgbBlock,
- vk::Format::eAstc6x6UnormBlock,
- vk::Format::eAstc6x6SrgbBlock,
- vk::Format::eAstc10x10UnormBlock,
- vk::Format::eAstc10x10SrgbBlock,
- vk::Format::eAstc12x12UnormBlock,
- vk::Format::eAstc12x12SrgbBlock,
- vk::Format::eAstc8x6UnormBlock,
- vk::Format::eAstc8x6SrgbBlock,
- vk::Format::eAstc6x5UnormBlock,
- vk::Format::eAstc6x5SrgbBlock,
- vk::Format::eE5B9G9R9UfloatPack32};
- std::unordered_map<vk::Format, vk::FormatProperties> format_properties;
- for (const auto format : formats) {
- format_properties.emplace(format, physical.getFormatProperties(format, dldi));
- }
- return format_properties;
-}
-
} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_device.h b/src/video_core/renderer_vulkan/vk_device.h
index 6e656517f..60d64572a 100644
--- a/src/video_core/renderer_vulkan/vk_device.h
+++ b/src/video_core/renderer_vulkan/vk_device.h
@@ -8,8 +8,9 @@
#include <string_view>
#include <unordered_map>
#include <vector>
+
#include "common/common_types.h"
-#include "video_core/renderer_vulkan/declarations.h"
+#include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan {
@@ -22,12 +23,12 @@ const u32 GuestWarpSize = 32;
/// Handles data specific to a physical device.
class VKDevice final {
public:
- explicit VKDevice(const vk::DispatchLoaderDynamic& dldi, vk::PhysicalDevice physical,
- vk::SurfaceKHR surface);
+ explicit VKDevice(VkInstance instance, vk::PhysicalDevice physical, VkSurfaceKHR surface,
+ const vk::InstanceDispatch& dld);
~VKDevice();
/// Initializes the device. Returns true on success.
- bool Create(const vk::DispatchLoaderDynamic& dldi, vk::Instance instance);
+ bool Create();
/**
* Returns a format supported by the device for the passed requeriments.
@@ -36,20 +37,20 @@ public:
* @param format_type Format type usage.
* @returns A format supported by the device.
*/
- vk::Format GetSupportedFormat(vk::Format wanted_format, vk::FormatFeatureFlags wanted_usage,
- FormatType format_type) const;
+ VkFormat GetSupportedFormat(VkFormat wanted_format, VkFormatFeatureFlags wanted_usage,
+ FormatType format_type) const;
/// Reports a device loss.
void ReportLoss() const;
/// Returns the dispatch loader with direct function pointers of the device.
- const vk::DispatchLoaderDynamic& GetDispatchLoader() const {
+ const vk::DeviceDispatch& GetDispatchLoader() const {
return dld;
}
/// Returns the logical device.
- vk::Device GetLogical() const {
- return logical.get();
+ const vk::Device& GetLogical() const {
+ return logical;
}
/// Returns the physical device.
@@ -79,7 +80,7 @@ public:
/// Returns true if the device is integrated with the host CPU.
bool IsIntegrated() const {
- return properties.deviceType == vk::PhysicalDeviceType::eIntegratedGpu;
+ return properties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
}
/// Returns the current Vulkan API version provided in Vulkan-formatted version numbers.
@@ -98,27 +99,27 @@ public:
}
/// Returns the driver ID.
- vk::DriverIdKHR GetDriverID() const {
+ VkDriverIdKHR GetDriverID() const {
return driver_id;
}
/// Returns uniform buffer alignment requeriment.
- vk::DeviceSize GetUniformBufferAlignment() const {
+ VkDeviceSize GetUniformBufferAlignment() const {
return properties.limits.minUniformBufferOffsetAlignment;
}
/// Returns storage alignment requeriment.
- vk::DeviceSize GetStorageBufferAlignment() const {
+ VkDeviceSize GetStorageBufferAlignment() const {
return properties.limits.minStorageBufferOffsetAlignment;
}
/// Returns the maximum range for storage buffers.
- vk::DeviceSize GetMaxStorageBufferRange() const {
+ VkDeviceSize GetMaxStorageBufferRange() const {
return properties.limits.maxStorageBufferRange;
}
/// Returns the maximum size for push constants.
- vk::DeviceSize GetMaxPushConstantsSize() const {
+ VkDeviceSize GetMaxPushConstantsSize() const {
return properties.limits.maxPushConstantsSize;
}
@@ -138,8 +139,8 @@ public:
}
/// Returns true if the device can be forced to use the guest warp size.
- bool IsGuestWarpSizeSupported(vk::ShaderStageFlagBits stage) const {
- return (guest_warp_stages & stage) != vk::ShaderStageFlags{};
+ bool IsGuestWarpSizeSupported(VkShaderStageFlagBits stage) const {
+ return guest_warp_stages & stage;
}
/// Returns true if formatless image load is supported.
@@ -188,50 +189,44 @@ public:
}
/// Checks if the physical device is suitable.
- static bool IsSuitable(const vk::DispatchLoaderDynamic& dldi, vk::PhysicalDevice physical,
- vk::SurfaceKHR surface);
+ static bool IsSuitable(vk::PhysicalDevice physical, VkSurfaceKHR surface);
private:
/// Loads extensions into a vector and stores available ones in this object.
- std::vector<const char*> LoadExtensions(const vk::DispatchLoaderDynamic& dldi);
+ std::vector<const char*> LoadExtensions();
/// Sets up queue families.
- void SetupFamilies(const vk::DispatchLoaderDynamic& dldi, vk::SurfaceKHR surface);
+ void SetupFamilies(VkSurfaceKHR surface);
/// Sets up device features.
- void SetupFeatures(const vk::DispatchLoaderDynamic& dldi);
+ void SetupFeatures();
/// Collects telemetry information from the device.
void CollectTelemetryParameters();
/// Returns a list of queue initialization descriptors.
- std::vector<vk::DeviceQueueCreateInfo> GetDeviceQueueCreateInfos() const;
+ std::vector<VkDeviceQueueCreateInfo> GetDeviceQueueCreateInfos() const;
/// Returns true if ASTC textures are natively supported.
- bool IsOptimalAstcSupported(const vk::PhysicalDeviceFeatures& features,
- const vk::DispatchLoaderDynamic& dldi) const;
+ bool IsOptimalAstcSupported(const VkPhysicalDeviceFeatures& features) const;
/// Returns true if a format is supported.
- bool IsFormatSupported(vk::Format wanted_format, vk::FormatFeatureFlags wanted_usage,
+ bool IsFormatSupported(VkFormat wanted_format, VkFormatFeatureFlags wanted_usage,
FormatType format_type) const;
- /// Returns the device properties for Vulkan formats.
- static std::unordered_map<vk::Format, vk::FormatProperties> GetFormatProperties(
- const vk::DispatchLoaderDynamic& dldi, vk::PhysicalDevice physical);
-
- const vk::PhysicalDevice physical; ///< Physical device.
- vk::DispatchLoaderDynamic dld; ///< Device function pointers.
- vk::PhysicalDeviceProperties properties; ///< Device properties.
- UniqueDevice logical; ///< Logical device.
- vk::Queue graphics_queue; ///< Main graphics queue.
- vk::Queue present_queue; ///< Main present queue.
- u32 graphics_family{}; ///< Main graphics queue family index.
- u32 present_family{}; ///< Main present queue family index.
- vk::DriverIdKHR driver_id{}; ///< Driver ID.
- vk::ShaderStageFlags guest_warp_stages{}; ///< Stages where the guest warp size can be forced.ed
- bool is_optimal_astc_supported{}; ///< Support for native ASTC.
- bool is_float16_supported{}; ///< Support for float16 arithmetics.
- bool is_warp_potentially_bigger{}; ///< Host warp size can be bigger than guest.
+ vk::DeviceDispatch dld; ///< Device function pointers.
+ vk::PhysicalDevice physical; ///< Physical device.
+ VkPhysicalDeviceProperties properties; ///< Device properties.
+ vk::Device logical; ///< Logical device.
+ vk::Queue graphics_queue; ///< Main graphics queue.
+ vk::Queue present_queue; ///< Main present queue.
+ u32 graphics_family{}; ///< Main graphics queue family index.
+ u32 present_family{}; ///< Main present queue family index.
+ VkDriverIdKHR driver_id{}; ///< Driver ID.
+ VkShaderStageFlags guest_warp_stages{}; ///< Stages where the guest warp size can be forced.ed
+ bool is_optimal_astc_supported{}; ///< Support for native ASTC.
+ bool is_float16_supported{}; ///< Support for float16 arithmetics.
+ bool is_warp_potentially_bigger{}; ///< Host warp size can be bigger than guest.
bool is_formatless_image_load_supported{}; ///< Support for shader image read without format.
bool khr_uniform_buffer_standard_layout{}; ///< Support for std430 on UBOs.
bool ext_index_type_uint8{}; ///< Support for VK_EXT_index_type_uint8.
@@ -245,7 +240,7 @@ private:
std::vector<std::string> reported_extensions; ///< Reported Vulkan extensions.
/// Format properties dictionary.
- std::unordered_map<vk::Format, vk::FormatProperties> format_properties;
+ std::unordered_map<VkFormat, VkFormatProperties> format_properties;
};
} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp
index 6a02403c1..b540b838d 100644
--- a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp
+++ b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp
@@ -2,11 +2,13 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
+#include <array>
+#include <cstring>
#include <vector>
+
#include "common/assert.h"
#include "common/common_types.h"
#include "common/microprofile.h"
-#include "video_core/renderer_vulkan/declarations.h"
#include "video_core/renderer_vulkan/fixed_pipeline_state.h"
#include "video_core/renderer_vulkan/maxwell_to_vk.h"
#include "video_core/renderer_vulkan/vk_descriptor_pool.h"
@@ -16,6 +18,7 @@
#include "video_core/renderer_vulkan/vk_renderpass_cache.h"
#include "video_core/renderer_vulkan/vk_scheduler.h"
#include "video_core/renderer_vulkan/vk_update_descriptor.h"
+#include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan {
@@ -23,21 +26,26 @@ MICROPROFILE_DECLARE(Vulkan_PipelineCache);
namespace {
-vk::StencilOpState GetStencilFaceState(const FixedPipelineState::StencilFace& face) {
- return vk::StencilOpState(MaxwellToVK::StencilOp(face.action_stencil_fail),
- MaxwellToVK::StencilOp(face.action_depth_pass),
- MaxwellToVK::StencilOp(face.action_depth_fail),
- MaxwellToVK::ComparisonOp(face.test_func), 0, 0, 0);
+VkStencilOpState GetStencilFaceState(const FixedPipelineState::StencilFace& face) {
+ VkStencilOpState state;
+ state.failOp = MaxwellToVK::StencilOp(face.action_stencil_fail);
+ state.passOp = MaxwellToVK::StencilOp(face.action_depth_pass);
+ state.depthFailOp = MaxwellToVK::StencilOp(face.action_depth_fail);
+ state.compareOp = MaxwellToVK::ComparisonOp(face.test_func);
+ state.compareMask = 0;
+ state.writeMask = 0;
+ state.reference = 0;
+ return state;
}
-bool SupportsPrimitiveRestart(vk::PrimitiveTopology topology) {
+bool SupportsPrimitiveRestart(VkPrimitiveTopology topology) {
static constexpr std::array unsupported_topologies = {
- vk::PrimitiveTopology::ePointList,
- vk::PrimitiveTopology::eLineList,
- vk::PrimitiveTopology::eTriangleList,
- vk::PrimitiveTopology::eLineListWithAdjacency,
- vk::PrimitiveTopology::eTriangleListWithAdjacency,
- vk::PrimitiveTopology::ePatchList};
+ VK_PRIMITIVE_TOPOLOGY_POINT_LIST,
+ VK_PRIMITIVE_TOPOLOGY_LINE_LIST,
+ VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST,
+ VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY,
+ VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY,
+ VK_PRIMITIVE_TOPOLOGY_PATCH_LIST};
return std::find(std::begin(unsupported_topologies), std::end(unsupported_topologies),
topology) == std::end(unsupported_topologies);
}
@@ -49,7 +57,7 @@ VKGraphicsPipeline::VKGraphicsPipeline(const VKDevice& device, VKScheduler& sche
VKUpdateDescriptorQueue& update_descriptor_queue,
VKRenderPassCache& renderpass_cache,
const GraphicsPipelineCacheKey& key,
- const std::vector<vk::DescriptorSetLayoutBinding>& bindings,
+ vk::Span<VkDescriptorSetLayoutBinding> bindings,
const SPIRVProgram& program)
: device{device}, scheduler{scheduler}, fixed_state{key.fixed_state}, hash{key.Hash()},
descriptor_set_layout{CreateDescriptorSetLayout(bindings)},
@@ -63,7 +71,7 @@ VKGraphicsPipeline::VKGraphicsPipeline(const VKDevice& device, VKScheduler& sche
VKGraphicsPipeline::~VKGraphicsPipeline() = default;
-vk::DescriptorSet VKGraphicsPipeline::CommitDescriptorSet() {
+VkDescriptorSet VKGraphicsPipeline::CommitDescriptorSet() {
if (!descriptor_template) {
return {};
}
@@ -72,27 +80,32 @@ vk::DescriptorSet VKGraphicsPipeline::CommitDescriptorSet() {
return set;
}
-UniqueDescriptorSetLayout VKGraphicsPipeline::CreateDescriptorSetLayout(
- const std::vector<vk::DescriptorSetLayoutBinding>& bindings) const {
- const vk::DescriptorSetLayoutCreateInfo descriptor_set_layout_ci(
- {}, static_cast<u32>(bindings.size()), bindings.data());
-
- const auto dev = device.GetLogical();
- const auto& dld = device.GetDispatchLoader();
- return dev.createDescriptorSetLayoutUnique(descriptor_set_layout_ci, nullptr, dld);
+vk::DescriptorSetLayout VKGraphicsPipeline::CreateDescriptorSetLayout(
+ vk::Span<VkDescriptorSetLayoutBinding> bindings) const {
+ VkDescriptorSetLayoutCreateInfo ci;
+ ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
+ ci.pNext = nullptr;
+ ci.flags = 0;
+ ci.bindingCount = bindings.size();
+ ci.pBindings = bindings.data();
+ return device.GetLogical().CreateDescriptorSetLayout(ci);
}
-UniquePipelineLayout VKGraphicsPipeline::CreatePipelineLayout() const {
- const vk::PipelineLayoutCreateInfo pipeline_layout_ci({}, 1, &*descriptor_set_layout, 0,
- nullptr);
- const auto dev = device.GetLogical();
- const auto& dld = device.GetDispatchLoader();
- return dev.createPipelineLayoutUnique(pipeline_layout_ci, nullptr, dld);
+vk::PipelineLayout VKGraphicsPipeline::CreatePipelineLayout() const {
+ VkPipelineLayoutCreateInfo ci;
+ ci.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
+ ci.pNext = nullptr;
+ ci.flags = 0;
+ ci.setLayoutCount = 1;
+ ci.pSetLayouts = descriptor_set_layout.address();
+ ci.pushConstantRangeCount = 0;
+ ci.pPushConstantRanges = nullptr;
+ return device.GetLogical().CreatePipelineLayout(ci);
}
-UniqueDescriptorUpdateTemplate VKGraphicsPipeline::CreateDescriptorUpdateTemplate(
+vk::DescriptorUpdateTemplateKHR VKGraphicsPipeline::CreateDescriptorUpdateTemplate(
const SPIRVProgram& program) const {
- std::vector<vk::DescriptorUpdateTemplateEntry> template_entries;
+ std::vector<VkDescriptorUpdateTemplateEntry> template_entries;
u32 binding = 0;
u32 offset = 0;
for (const auto& stage : program) {
@@ -102,38 +115,47 @@ UniqueDescriptorUpdateTemplate VKGraphicsPipeline::CreateDescriptorUpdateTemplat
}
if (template_entries.empty()) {
// If the shader doesn't use descriptor sets, skip template creation.
- return UniqueDescriptorUpdateTemplate{};
+ return {};
}
- const vk::DescriptorUpdateTemplateCreateInfo template_ci(
- {}, static_cast<u32>(template_entries.size()), template_entries.data(),
- vk::DescriptorUpdateTemplateType::eDescriptorSet, *descriptor_set_layout,
- vk::PipelineBindPoint::eGraphics, *layout, DESCRIPTOR_SET);
-
- const auto dev = device.GetLogical();
- const auto& dld = device.GetDispatchLoader();
- return dev.createDescriptorUpdateTemplateUnique(template_ci, nullptr, dld);
+ VkDescriptorUpdateTemplateCreateInfoKHR ci;
+ ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO_KHR;
+ ci.pNext = nullptr;
+ ci.flags = 0;
+ ci.descriptorUpdateEntryCount = static_cast<u32>(template_entries.size());
+ ci.pDescriptorUpdateEntries = template_entries.data();
+ ci.templateType = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET_KHR;
+ ci.descriptorSetLayout = *descriptor_set_layout;
+ ci.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
+ ci.pipelineLayout = *layout;
+ ci.set = DESCRIPTOR_SET;
+ return device.GetLogical().CreateDescriptorUpdateTemplateKHR(ci);
}
-std::vector<UniqueShaderModule> VKGraphicsPipeline::CreateShaderModules(
+std::vector<vk::ShaderModule> VKGraphicsPipeline::CreateShaderModules(
const SPIRVProgram& program) const {
- std::vector<UniqueShaderModule> modules;
- const auto dev = device.GetLogical();
- const auto& dld = device.GetDispatchLoader();
+ VkShaderModuleCreateInfo ci;
+ ci.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
+ ci.pNext = nullptr;
+ ci.flags = 0;
+
+ std::vector<vk::ShaderModule> modules;
+ modules.reserve(Maxwell::MaxShaderStage);
for (std::size_t i = 0; i < Maxwell::MaxShaderStage; ++i) {
const auto& stage = program[i];
if (!stage) {
continue;
}
- const vk::ShaderModuleCreateInfo module_ci({}, stage->code.size() * sizeof(u32),
- stage->code.data());
- modules.emplace_back(dev.createShaderModuleUnique(module_ci, nullptr, dld));
+
+ ci.codeSize = stage->code.size() * sizeof(u32);
+ ci.pCode = stage->code.data();
+ modules.push_back(device.GetLogical().CreateShaderModule(ci));
}
return modules;
}
-UniquePipeline VKGraphicsPipeline::CreatePipeline(const RenderPassParams& renderpass_params,
- const SPIRVProgram& program) const {
+vk::Pipeline VKGraphicsPipeline::CreatePipeline(const RenderPassParams& renderpass_params,
+ const SPIRVProgram& program) const {
const auto& vi = fixed_state.vertex_input;
const auto& ia = fixed_state.input_assembly;
const auto& ds = fixed_state.depth_stencil;
@@ -141,19 +163,26 @@ UniquePipeline VKGraphicsPipeline::CreatePipeline(const RenderPassParams& render
const auto& ts = fixed_state.tessellation;
const auto& rs = fixed_state.rasterizer;
- std::vector<vk::VertexInputBindingDescription> vertex_bindings;
- std::vector<vk::VertexInputBindingDivisorDescriptionEXT> vertex_binding_divisors;
+ std::vector<VkVertexInputBindingDescription> vertex_bindings;
+ std::vector<VkVertexInputBindingDivisorDescriptionEXT> vertex_binding_divisors;
for (std::size_t i = 0; i < vi.num_bindings; ++i) {
const auto& binding = vi.bindings[i];
const bool instanced = binding.divisor != 0;
- const auto rate = instanced ? vk::VertexInputRate::eInstance : vk::VertexInputRate::eVertex;
- vertex_bindings.emplace_back(binding.index, binding.stride, rate);
+ const auto rate = instanced ? VK_VERTEX_INPUT_RATE_INSTANCE : VK_VERTEX_INPUT_RATE_VERTEX;
+
+ auto& vertex_binding = vertex_bindings.emplace_back();
+ vertex_binding.binding = binding.index;
+ vertex_binding.stride = binding.stride;
+ vertex_binding.inputRate = rate;
+
if (instanced) {
- vertex_binding_divisors.emplace_back(binding.index, binding.divisor);
+ auto& binding_divisor = vertex_binding_divisors.emplace_back();
+ binding_divisor.binding = binding.index;
+ binding_divisor.divisor = binding.divisor;
}
}
- std::vector<vk::VertexInputAttributeDescription> vertex_attributes;
+ std::vector<VkVertexInputAttributeDescription> vertex_attributes;
const auto& input_attributes = program[0]->entries.attributes;
for (std::size_t i = 0; i < vi.num_attributes; ++i) {
const auto& attribute = vi.attributes[i];
@@ -161,109 +190,194 @@ UniquePipeline VKGraphicsPipeline::CreatePipeline(const RenderPassParams& render
// Skip attributes not used by the vertex shaders.
continue;
}
- vertex_attributes.emplace_back(attribute.index, attribute.buffer,
- MaxwellToVK::VertexFormat(attribute.type, attribute.size),
- attribute.offset);
+ auto& vertex_attribute = vertex_attributes.emplace_back();
+ vertex_attribute.location = attribute.index;
+ vertex_attribute.binding = attribute.buffer;
+ vertex_attribute.format = MaxwellToVK::VertexFormat(attribute.type, attribute.size);
+ vertex_attribute.offset = attribute.offset;
}
- vk::PipelineVertexInputStateCreateInfo vertex_input_ci(
- {}, static_cast<u32>(vertex_bindings.size()), vertex_bindings.data(),
- static_cast<u32>(vertex_attributes.size()), vertex_attributes.data());
-
- const vk::PipelineVertexInputDivisorStateCreateInfoEXT vertex_input_divisor_ci(
- static_cast<u32>(vertex_binding_divisors.size()), vertex_binding_divisors.data());
+ VkPipelineVertexInputStateCreateInfo vertex_input_ci;
+ vertex_input_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
+ vertex_input_ci.pNext = nullptr;
+ vertex_input_ci.flags = 0;
+ vertex_input_ci.vertexBindingDescriptionCount = static_cast<u32>(vertex_bindings.size());
+ vertex_input_ci.pVertexBindingDescriptions = vertex_bindings.data();
+ vertex_input_ci.vertexAttributeDescriptionCount = static_cast<u32>(vertex_attributes.size());
+ vertex_input_ci.pVertexAttributeDescriptions = vertex_attributes.data();
+
+ VkPipelineVertexInputDivisorStateCreateInfoEXT input_divisor_ci;
+ input_divisor_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT;
+ input_divisor_ci.pNext = nullptr;
+ input_divisor_ci.vertexBindingDivisorCount = static_cast<u32>(vertex_binding_divisors.size());
+ input_divisor_ci.pVertexBindingDivisors = vertex_binding_divisors.data();
if (!vertex_binding_divisors.empty()) {
- vertex_input_ci.pNext = &vertex_input_divisor_ci;
+ vertex_input_ci.pNext = &input_divisor_ci;
}
- const auto primitive_topology = MaxwellToVK::PrimitiveTopology(device, ia.topology);
- const vk::PipelineInputAssemblyStateCreateInfo input_assembly_ci(
- {}, primitive_topology,
- ia.primitive_restart_enable && SupportsPrimitiveRestart(primitive_topology));
-
- const vk::PipelineTessellationStateCreateInfo tessellation_ci({}, ts.patch_control_points);
-
- const vk::PipelineViewportStateCreateInfo viewport_ci({}, Maxwell::NumViewports, nullptr,
- Maxwell::NumViewports, nullptr);
-
- // TODO(Rodrigo): Find out what's the default register value for front face
- const vk::PipelineRasterizationStateCreateInfo rasterizer_ci(
- {}, rs.depth_clamp_enable, false, vk::PolygonMode::eFill,
- rs.cull_enable ? MaxwellToVK::CullFace(rs.cull_face) : vk::CullModeFlagBits::eNone,
- MaxwellToVK::FrontFace(rs.front_face), rs.depth_bias_enable, 0.0f, 0.0f, 0.0f, 1.0f);
-
- const vk::PipelineMultisampleStateCreateInfo multisampling_ci(
- {}, vk::SampleCountFlagBits::e1, false, 0.0f, nullptr, false, false);
-
- const vk::CompareOp depth_test_compare = ds.depth_test_enable
- ? MaxwellToVK::ComparisonOp(ds.depth_test_function)
- : vk::CompareOp::eAlways;
-
- const vk::PipelineDepthStencilStateCreateInfo depth_stencil_ci(
- {}, ds.depth_test_enable, ds.depth_write_enable, depth_test_compare, ds.depth_bounds_enable,
- ds.stencil_enable, GetStencilFaceState(ds.front_stencil),
- GetStencilFaceState(ds.back_stencil), 0.0f, 0.0f);
-
- std::array<vk::PipelineColorBlendAttachmentState, Maxwell::NumRenderTargets> cb_attachments;
+ VkPipelineInputAssemblyStateCreateInfo input_assembly_ci;
+ input_assembly_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO;
+ input_assembly_ci.pNext = nullptr;
+ input_assembly_ci.flags = 0;
+ input_assembly_ci.topology = MaxwellToVK::PrimitiveTopology(device, ia.topology);
+ input_assembly_ci.primitiveRestartEnable =
+ ia.primitive_restart_enable && SupportsPrimitiveRestart(input_assembly_ci.topology);
+
+ VkPipelineTessellationStateCreateInfo tessellation_ci;
+ tessellation_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO;
+ tessellation_ci.pNext = nullptr;
+ tessellation_ci.flags = 0;
+ tessellation_ci.patchControlPoints = ts.patch_control_points;
+
+ VkPipelineViewportStateCreateInfo viewport_ci;
+ viewport_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO;
+ viewport_ci.pNext = nullptr;
+ viewport_ci.flags = 0;
+ viewport_ci.viewportCount = Maxwell::NumViewports;
+ viewport_ci.pViewports = nullptr;
+ viewport_ci.scissorCount = Maxwell::NumViewports;
+ viewport_ci.pScissors = nullptr;
+
+ VkPipelineRasterizationStateCreateInfo rasterization_ci;
+ rasterization_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
+ rasterization_ci.pNext = nullptr;
+ rasterization_ci.flags = 0;
+ rasterization_ci.depthClampEnable = rs.depth_clamp_enable;
+ rasterization_ci.rasterizerDiscardEnable = VK_FALSE;
+ rasterization_ci.polygonMode = VK_POLYGON_MODE_FILL;
+ rasterization_ci.cullMode =
+ rs.cull_enable ? MaxwellToVK::CullFace(rs.cull_face) : VK_CULL_MODE_NONE;
+ rasterization_ci.frontFace = MaxwellToVK::FrontFace(rs.front_face);
+ rasterization_ci.depthBiasEnable = rs.depth_bias_enable;
+ rasterization_ci.depthBiasConstantFactor = 0.0f;
+ rasterization_ci.depthBiasClamp = 0.0f;
+ rasterization_ci.depthBiasSlopeFactor = 0.0f;
+ rasterization_ci.lineWidth = 1.0f;
+
+ VkPipelineMultisampleStateCreateInfo multisample_ci;
+ multisample_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO;
+ multisample_ci.pNext = nullptr;
+ multisample_ci.flags = 0;
+ multisample_ci.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT;
+ multisample_ci.sampleShadingEnable = VK_FALSE;
+ multisample_ci.minSampleShading = 0.0f;
+ multisample_ci.pSampleMask = nullptr;
+ multisample_ci.alphaToCoverageEnable = VK_FALSE;
+ multisample_ci.alphaToOneEnable = VK_FALSE;
+
+ VkPipelineDepthStencilStateCreateInfo depth_stencil_ci;
+ depth_stencil_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO;
+ depth_stencil_ci.pNext = nullptr;
+ depth_stencil_ci.flags = 0;
+ depth_stencil_ci.depthTestEnable = ds.depth_test_enable;
+ depth_stencil_ci.depthWriteEnable = ds.depth_write_enable;
+ depth_stencil_ci.depthCompareOp = ds.depth_test_enable
+ ? MaxwellToVK::ComparisonOp(ds.depth_test_function)
+ : VK_COMPARE_OP_ALWAYS;
+ depth_stencil_ci.depthBoundsTestEnable = ds.depth_bounds_enable;
+ depth_stencil_ci.stencilTestEnable = ds.stencil_enable;
+ depth_stencil_ci.front = GetStencilFaceState(ds.front_stencil);
+ depth_stencil_ci.back = GetStencilFaceState(ds.back_stencil);
+ depth_stencil_ci.minDepthBounds = 0.0f;
+ depth_stencil_ci.maxDepthBounds = 0.0f;
+
+ std::array<VkPipelineColorBlendAttachmentState, Maxwell::NumRenderTargets> cb_attachments;
const std::size_t num_attachments =
std::min(cd.attachments_count, renderpass_params.color_attachments.size());
for (std::size_t i = 0; i < num_attachments; ++i) {
- constexpr std::array component_table{
- vk::ColorComponentFlagBits::eR, vk::ColorComponentFlagBits::eG,
- vk::ColorComponentFlagBits::eB, vk::ColorComponentFlagBits::eA};
+ static constexpr std::array component_table = {
+ VK_COLOR_COMPONENT_R_BIT, VK_COLOR_COMPONENT_G_BIT, VK_COLOR_COMPONENT_B_BIT,
+ VK_COLOR_COMPONENT_A_BIT};
const auto& blend = cd.attachments[i];
- vk::ColorComponentFlags color_components{};
+ VkColorComponentFlags color_components = 0;
for (std::size_t j = 0; j < component_table.size(); ++j) {
- if (blend.components[j])
+ if (blend.components[j]) {
color_components |= component_table[j];
+ }
}
- cb_attachments[i] = vk::PipelineColorBlendAttachmentState(
- blend.enable, MaxwellToVK::BlendFactor(blend.src_rgb_func),
- MaxwellToVK::BlendFactor(blend.dst_rgb_func),
- MaxwellToVK::BlendEquation(blend.rgb_equation),
- MaxwellToVK::BlendFactor(blend.src_a_func), MaxwellToVK::BlendFactor(blend.dst_a_func),
- MaxwellToVK::BlendEquation(blend.a_equation), color_components);
+ VkPipelineColorBlendAttachmentState& attachment = cb_attachments[i];
+ attachment.blendEnable = blend.enable;
+ attachment.srcColorBlendFactor = MaxwellToVK::BlendFactor(blend.src_rgb_func);
+ attachment.dstColorBlendFactor = MaxwellToVK::BlendFactor(blend.dst_rgb_func);
+ attachment.colorBlendOp = MaxwellToVK::BlendEquation(blend.rgb_equation);
+ attachment.srcAlphaBlendFactor = MaxwellToVK::BlendFactor(blend.src_a_func);
+ attachment.dstAlphaBlendFactor = MaxwellToVK::BlendFactor(blend.dst_a_func);
+ attachment.alphaBlendOp = MaxwellToVK::BlendEquation(blend.a_equation);
+ attachment.colorWriteMask = color_components;
}
- const vk::PipelineColorBlendStateCreateInfo color_blending_ci({}, false, vk::LogicOp::eCopy,
- static_cast<u32>(num_attachments),
- cb_attachments.data(), {});
-
- constexpr std::array dynamic_states = {
- vk::DynamicState::eViewport, vk::DynamicState::eScissor,
- vk::DynamicState::eDepthBias, vk::DynamicState::eBlendConstants,
- vk::DynamicState::eDepthBounds, vk::DynamicState::eStencilCompareMask,
- vk::DynamicState::eStencilWriteMask, vk::DynamicState::eStencilReference};
- const vk::PipelineDynamicStateCreateInfo dynamic_state_ci(
- {}, static_cast<u32>(dynamic_states.size()), dynamic_states.data());
-
- vk::PipelineShaderStageRequiredSubgroupSizeCreateInfoEXT subgroup_size_ci;
+
+ VkPipelineColorBlendStateCreateInfo color_blend_ci;
+ color_blend_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO;
+ color_blend_ci.pNext = nullptr;
+ color_blend_ci.flags = 0;
+ color_blend_ci.logicOpEnable = VK_FALSE;
+ color_blend_ci.logicOp = VK_LOGIC_OP_COPY;
+ color_blend_ci.attachmentCount = static_cast<u32>(num_attachments);
+ color_blend_ci.pAttachments = cb_attachments.data();
+ std::memset(color_blend_ci.blendConstants, 0, sizeof(color_blend_ci.blendConstants));
+
+ static constexpr std::array dynamic_states = {
+ VK_DYNAMIC_STATE_VIEWPORT, VK_DYNAMIC_STATE_SCISSOR,
+ VK_DYNAMIC_STATE_DEPTH_BIAS, VK_DYNAMIC_STATE_BLEND_CONSTANTS,
+ VK_DYNAMIC_STATE_DEPTH_BOUNDS, VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK,
+ VK_DYNAMIC_STATE_STENCIL_WRITE_MASK, VK_DYNAMIC_STATE_STENCIL_REFERENCE};
+
+ VkPipelineDynamicStateCreateInfo dynamic_state_ci;
+ dynamic_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO;
+ dynamic_state_ci.pNext = nullptr;
+ dynamic_state_ci.flags = 0;
+ dynamic_state_ci.dynamicStateCount = static_cast<u32>(dynamic_states.size());
+ dynamic_state_ci.pDynamicStates = dynamic_states.data();
+
+ VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT subgroup_size_ci;
+ subgroup_size_ci.sType =
+ VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT;
+ subgroup_size_ci.pNext = nullptr;
subgroup_size_ci.requiredSubgroupSize = GuestWarpSize;
- std::vector<vk::PipelineShaderStageCreateInfo> shader_stages;
+ std::vector<VkPipelineShaderStageCreateInfo> shader_stages;
std::size_t module_index = 0;
for (std::size_t stage = 0; stage < Maxwell::MaxShaderStage; ++stage) {
if (!program[stage]) {
continue;
}
- const auto stage_enum = static_cast<Tegra::Engines::ShaderType>(stage);
- const auto vk_stage = MaxwellToVK::ShaderStage(stage_enum);
- auto& stage_ci = shader_stages.emplace_back(vk::PipelineShaderStageCreateFlags{}, vk_stage,
- *modules[module_index++], "main", nullptr);
- if (program[stage]->entries.uses_warps && device.IsGuestWarpSizeSupported(vk_stage)) {
+ VkPipelineShaderStageCreateInfo& stage_ci = shader_stages.emplace_back();
+ stage_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
+ stage_ci.pNext = nullptr;
+ stage_ci.flags = 0;
+ stage_ci.stage = MaxwellToVK::ShaderStage(static_cast<Tegra::Engines::ShaderType>(stage));
+ stage_ci.module = *modules[module_index++];
+ stage_ci.pName = "main";
+ stage_ci.pSpecializationInfo = nullptr;
+
+ if (program[stage]->entries.uses_warps && device.IsGuestWarpSizeSupported(stage_ci.stage)) {
stage_ci.pNext = &subgroup_size_ci;
}
}
- const vk::GraphicsPipelineCreateInfo create_info(
- {}, static_cast<u32>(shader_stages.size()), shader_stages.data(), &vertex_input_ci,
- &input_assembly_ci, &tessellation_ci, &viewport_ci, &rasterizer_ci, &multisampling_ci,
- &depth_stencil_ci, &color_blending_ci, &dynamic_state_ci, *layout, renderpass, 0, {}, 0);
-
- const auto dev = device.GetLogical();
- const auto& dld = device.GetDispatchLoader();
- return dev.createGraphicsPipelineUnique(nullptr, create_info, nullptr, dld);
+ VkGraphicsPipelineCreateInfo ci;
+ ci.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;
+ ci.pNext = nullptr;
+ ci.flags = 0;
+ ci.stageCount = static_cast<u32>(shader_stages.size());
+ ci.pStages = shader_stages.data();
+ ci.pVertexInputState = &vertex_input_ci;
+ ci.pInputAssemblyState = &input_assembly_ci;
+ ci.pTessellationState = &tessellation_ci;
+ ci.pViewportState = &viewport_ci;
+ ci.pRasterizationState = &rasterization_ci;
+ ci.pMultisampleState = &multisample_ci;
+ ci.pDepthStencilState = &depth_stencil_ci;
+ ci.pColorBlendState = &color_blend_ci;
+ ci.pDynamicState = &dynamic_state_ci;
+ ci.layout = *layout;
+ ci.renderPass = renderpass;
+ ci.subpass = 0;
+ ci.basePipelineHandle = nullptr;
+ ci.basePipelineIndex = 0;
+ return device.GetLogical().CreateGraphicsPipeline(ci);
}
} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_graphics_pipeline.h b/src/video_core/renderer_vulkan/vk_graphics_pipeline.h
index 4f5e4ea2d..7aba70960 100644
--- a/src/video_core/renderer_vulkan/vk_graphics_pipeline.h
+++ b/src/video_core/renderer_vulkan/vk_graphics_pipeline.h
@@ -11,12 +11,12 @@
#include <vector>
#include "video_core/engines/maxwell_3d.h"
-#include "video_core/renderer_vulkan/declarations.h"
#include "video_core/renderer_vulkan/fixed_pipeline_state.h"
#include "video_core/renderer_vulkan/vk_descriptor_pool.h"
#include "video_core/renderer_vulkan/vk_renderpass_cache.h"
#include "video_core/renderer_vulkan/vk_resource_manager.h"
#include "video_core/renderer_vulkan/vk_shader_decompiler.h"
+#include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan {
@@ -39,52 +39,52 @@ public:
VKUpdateDescriptorQueue& update_descriptor_queue,
VKRenderPassCache& renderpass_cache,
const GraphicsPipelineCacheKey& key,
- const std::vector<vk::DescriptorSetLayoutBinding>& bindings,
+ vk::Span<VkDescriptorSetLayoutBinding> bindings,
const SPIRVProgram& program);
~VKGraphicsPipeline();
- vk::DescriptorSet CommitDescriptorSet();
+ VkDescriptorSet CommitDescriptorSet();
- vk::Pipeline GetHandle() const {
+ VkPipeline GetHandle() const {
return *pipeline;
}
- vk::PipelineLayout GetLayout() const {
+ VkPipelineLayout GetLayout() const {
return *layout;
}
- vk::RenderPass GetRenderPass() const {
+ VkRenderPass GetRenderPass() const {
return renderpass;
}
private:
- UniqueDescriptorSetLayout CreateDescriptorSetLayout(
- const std::vector<vk::DescriptorSetLayoutBinding>& bindings) const;
+ vk::DescriptorSetLayout CreateDescriptorSetLayout(
+ vk::Span<VkDescriptorSetLayoutBinding> bindings) const;
- UniquePipelineLayout CreatePipelineLayout() const;
+ vk::PipelineLayout CreatePipelineLayout() const;
- UniqueDescriptorUpdateTemplate CreateDescriptorUpdateTemplate(
+ vk::DescriptorUpdateTemplateKHR CreateDescriptorUpdateTemplate(
const SPIRVProgram& program) const;
- std::vector<UniqueShaderModule> CreateShaderModules(const SPIRVProgram& program) const;
+ std::vector<vk::ShaderModule> CreateShaderModules(const SPIRVProgram& program) const;
- UniquePipeline CreatePipeline(const RenderPassParams& renderpass_params,
- const SPIRVProgram& program) const;
+ vk::Pipeline CreatePipeline(const RenderPassParams& renderpass_params,
+ const SPIRVProgram& program) const;
const VKDevice& device;
VKScheduler& scheduler;
const FixedPipelineState fixed_state;
const u64 hash;
- UniqueDescriptorSetLayout descriptor_set_layout;
+ vk::DescriptorSetLayout descriptor_set_layout;
DescriptorAllocator descriptor_allocator;
VKUpdateDescriptorQueue& update_descriptor_queue;
- UniquePipelineLayout layout;
- UniqueDescriptorUpdateTemplate descriptor_template;
- std::vector<UniqueShaderModule> modules;
+ vk::PipelineLayout layout;
+ vk::DescriptorUpdateTemplateKHR descriptor_template;
+ std::vector<vk::ShaderModule> modules;
- vk::RenderPass renderpass;
- UniquePipeline pipeline;
+ VkRenderPass renderpass;
+ vk::Pipeline pipeline;
};
} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_image.cpp b/src/video_core/renderer_vulkan/vk_image.cpp
index 4bcbef959..9bceb3861 100644
--- a/src/video_core/renderer_vulkan/vk_image.cpp
+++ b/src/video_core/renderer_vulkan/vk_image.cpp
@@ -6,22 +6,21 @@
#include <vector>
#include "common/assert.h"
-#include "video_core/renderer_vulkan/declarations.h"
#include "video_core/renderer_vulkan/vk_device.h"
#include "video_core/renderer_vulkan/vk_image.h"
#include "video_core/renderer_vulkan/vk_scheduler.h"
+#include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan {
-VKImage::VKImage(const VKDevice& device, VKScheduler& scheduler,
- const vk::ImageCreateInfo& image_ci, vk::ImageAspectFlags aspect_mask)
+VKImage::VKImage(const VKDevice& device, VKScheduler& scheduler, const VkImageCreateInfo& image_ci,
+ VkImageAspectFlags aspect_mask)
: device{device}, scheduler{scheduler}, format{image_ci.format}, aspect_mask{aspect_mask},
image_num_layers{image_ci.arrayLayers}, image_num_levels{image_ci.mipLevels} {
UNIMPLEMENTED_IF_MSG(image_ci.queueFamilyIndexCount != 0,
"Queue family tracking is not implemented");
- const auto dev = device.GetLogical();
- image = dev.createImageUnique(image_ci, nullptr, device.GetDispatchLoader());
+ image = device.GetLogical().CreateImage(image_ci);
const u32 num_ranges = image_num_layers * image_num_levels;
barriers.resize(num_ranges);
@@ -31,8 +30,8 @@ VKImage::VKImage(const VKDevice& device, VKScheduler& scheduler,
VKImage::~VKImage() = default;
void VKImage::Transition(u32 base_layer, u32 num_layers, u32 base_level, u32 num_levels,
- vk::PipelineStageFlags new_stage_mask, vk::AccessFlags new_access,
- vk::ImageLayout new_layout) {
+ VkPipelineStageFlags new_stage_mask, VkAccessFlags new_access,
+ VkImageLayout new_layout) {
if (!HasChanged(base_layer, num_layers, base_level, num_levels, new_access, new_layout)) {
return;
}
@@ -43,9 +42,21 @@ void VKImage::Transition(u32 base_layer, u32 num_layers, u32 base_level, u32 num
const u32 layer = base_layer + layer_it;
const u32 level = base_level + level_it;
auto& state = GetSubrangeState(layer, level);
- barriers[cursor] = vk::ImageMemoryBarrier(
- state.access, new_access, state.layout, new_layout, VK_QUEUE_FAMILY_IGNORED,
- VK_QUEUE_FAMILY_IGNORED, *image, {aspect_mask, level, 1, layer, 1});
+ auto& barrier = barriers[cursor];
+ barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
+ barrier.pNext = nullptr;
+ barrier.srcAccessMask = state.access;
+ barrier.dstAccessMask = new_access;
+ barrier.oldLayout = state.layout;
+ barrier.newLayout = new_layout;
+ barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+ barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+ barrier.image = *image;
+ barrier.subresourceRange.aspectMask = aspect_mask;
+ barrier.subresourceRange.baseMipLevel = level;
+ barrier.subresourceRange.levelCount = 1;
+ barrier.subresourceRange.baseArrayLayer = layer;
+ barrier.subresourceRange.layerCount = 1;
state.access = new_access;
state.layout = new_layout;
}
@@ -53,16 +64,16 @@ void VKImage::Transition(u32 base_layer, u32 num_layers, u32 base_level, u32 num
scheduler.RequestOutsideRenderPassOperationContext();
- scheduler.Record([barriers = barriers, cursor](auto cmdbuf, auto& dld) {
+ scheduler.Record([barriers = barriers, cursor](vk::CommandBuffer cmdbuf) {
// TODO(Rodrigo): Implement a way to use the latest stage across subresources.
- constexpr auto stage_stub = vk::PipelineStageFlagBits::eAllCommands;
- cmdbuf.pipelineBarrier(stage_stub, stage_stub, {}, 0, nullptr, 0, nullptr,
- static_cast<u32>(cursor), barriers.data(), dld);
+ cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
+ VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, {}, {},
+ vk::Span(barriers.data(), cursor));
});
}
bool VKImage::HasChanged(u32 base_layer, u32 num_layers, u32 base_level, u32 num_levels,
- vk::AccessFlags new_access, vk::ImageLayout new_layout) noexcept {
+ VkAccessFlags new_access, VkImageLayout new_layout) noexcept {
const bool is_full_range = base_layer == 0 && num_layers == image_num_layers &&
base_level == 0 && num_levels == image_num_levels;
if (!is_full_range) {
@@ -91,11 +102,21 @@ bool VKImage::HasChanged(u32 base_layer, u32 num_layers, u32 base_level, u32 num
void VKImage::CreatePresentView() {
// Image type has to be 2D to be presented.
- const vk::ImageViewCreateInfo image_view_ci({}, *image, vk::ImageViewType::e2D, format, {},
- {aspect_mask, 0, 1, 0, 1});
- const auto dev = device.GetLogical();
- const auto& dld = device.GetDispatchLoader();
- present_view = dev.createImageViewUnique(image_view_ci, nullptr, dld);
+ VkImageViewCreateInfo image_view_ci;
+ image_view_ci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
+ image_view_ci.pNext = nullptr;
+ image_view_ci.flags = 0;
+ image_view_ci.image = *image;
+ image_view_ci.viewType = VK_IMAGE_VIEW_TYPE_2D;
+ image_view_ci.format = format;
+ image_view_ci.components = {VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY,
+ VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY};
+ image_view_ci.subresourceRange.aspectMask = aspect_mask;
+ image_view_ci.subresourceRange.baseMipLevel = 0;
+ image_view_ci.subresourceRange.levelCount = 1;
+ image_view_ci.subresourceRange.baseArrayLayer = 0;
+ image_view_ci.subresourceRange.layerCount = 1;
+ present_view = device.GetLogical().CreateImageView(image_view_ci);
}
VKImage::SubrangeState& VKImage::GetSubrangeState(u32 layer, u32 level) noexcept {
diff --git a/src/video_core/renderer_vulkan/vk_image.h b/src/video_core/renderer_vulkan/vk_image.h
index b78242512..b4d7229e5 100644
--- a/src/video_core/renderer_vulkan/vk_image.h
+++ b/src/video_core/renderer_vulkan/vk_image.h
@@ -8,7 +8,7 @@
#include <vector>
#include "common/common_types.h"
-#include "video_core/renderer_vulkan/declarations.h"
+#include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan {
@@ -18,16 +18,16 @@ class VKScheduler;
class VKImage {
public:
explicit VKImage(const VKDevice& device, VKScheduler& scheduler,
- const vk::ImageCreateInfo& image_ci, vk::ImageAspectFlags aspect_mask);
+ const VkImageCreateInfo& image_ci, VkImageAspectFlags aspect_mask);
~VKImage();
/// Records in the passed command buffer an image transition and updates the state of the image.
void Transition(u32 base_layer, u32 num_layers, u32 base_level, u32 num_levels,
- vk::PipelineStageFlags new_stage_mask, vk::AccessFlags new_access,
- vk::ImageLayout new_layout);
+ VkPipelineStageFlags new_stage_mask, VkAccessFlags new_access,
+ VkImageLayout new_layout);
/// Returns a view compatible with presentation, the image has to be 2D.
- vk::ImageView GetPresentView() {
+ VkImageView GetPresentView() {
if (!present_view) {
CreatePresentView();
}
@@ -35,28 +35,28 @@ public:
}
/// Returns the Vulkan image handler.
- vk::Image GetHandle() const {
- return *image;
+ const vk::Image& GetHandle() const {
+ return image;
}
/// Returns the Vulkan format for this image.
- vk::Format GetFormat() const {
+ VkFormat GetFormat() const {
return format;
}
/// Returns the Vulkan aspect mask.
- vk::ImageAspectFlags GetAspectMask() const {
+ VkImageAspectFlags GetAspectMask() const {
return aspect_mask;
}
private:
struct SubrangeState final {
- vk::AccessFlags access{}; ///< Current access bits.
- vk::ImageLayout layout = vk::ImageLayout::eUndefined; ///< Current image layout.
+ VkAccessFlags access = 0; ///< Current access bits.
+ VkImageLayout layout = VK_IMAGE_LAYOUT_UNDEFINED; ///< Current image layout.
};
bool HasChanged(u32 base_layer, u32 num_layers, u32 base_level, u32 num_levels,
- vk::AccessFlags new_access, vk::ImageLayout new_layout) noexcept;
+ VkAccessFlags new_access, VkImageLayout new_layout) noexcept;
/// Creates a presentation view.
void CreatePresentView();
@@ -67,16 +67,16 @@ private:
const VKDevice& device; ///< Device handler.
VKScheduler& scheduler; ///< Device scheduler.
- const vk::Format format; ///< Vulkan format.
- const vk::ImageAspectFlags aspect_mask; ///< Vulkan aspect mask.
- const u32 image_num_layers; ///< Number of layers.
- const u32 image_num_levels; ///< Number of mipmap levels.
+ const VkFormat format; ///< Vulkan format.
+ const VkImageAspectFlags aspect_mask; ///< Vulkan aspect mask.
+ const u32 image_num_layers; ///< Number of layers.
+ const u32 image_num_levels; ///< Number of mipmap levels.
- UniqueImage image; ///< Image handle.
- UniqueImageView present_view; ///< Image view compatible with presentation.
+ vk::Image image; ///< Image handle.
+ vk::ImageView present_view; ///< Image view compatible with presentation.
- std::vector<vk::ImageMemoryBarrier> barriers; ///< Pool of barriers.
- std::vector<SubrangeState> subrange_states; ///< Current subrange state.
+ std::vector<VkImageMemoryBarrier> barriers; ///< Pool of barriers.
+ std::vector<SubrangeState> subrange_states; ///< Current subrange state.
bool state_diverged = false; ///< True when subresources mismatch in layout.
};
diff --git a/src/video_core/renderer_vulkan/vk_memory_manager.cpp b/src/video_core/renderer_vulkan/vk_memory_manager.cpp
index 9cc9979d0..6a9e658bf 100644
--- a/src/video_core/renderer_vulkan/vk_memory_manager.cpp
+++ b/src/video_core/renderer_vulkan/vk_memory_manager.cpp
@@ -11,9 +11,9 @@
#include "common/assert.h"
#include "common/common_types.h"
#include "common/logging/log.h"
-#include "video_core/renderer_vulkan/declarations.h"
#include "video_core/renderer_vulkan/vk_device.h"
#include "video_core/renderer_vulkan/vk_memory_manager.h"
+#include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan {
@@ -30,17 +30,11 @@ u64 GetAllocationChunkSize(u64 required_size) {
class VKMemoryAllocation final {
public:
explicit VKMemoryAllocation(const VKDevice& device, vk::DeviceMemory memory,
- vk::MemoryPropertyFlags properties, u64 allocation_size, u32 type)
- : device{device}, memory{memory}, properties{properties}, allocation_size{allocation_size},
- shifted_type{ShiftType(type)} {}
-
- ~VKMemoryAllocation() {
- const auto dev = device.GetLogical();
- const auto& dld = device.GetDispatchLoader();
- dev.free(memory, nullptr, dld);
- }
+ VkMemoryPropertyFlags properties, u64 allocation_size, u32 type)
+ : device{device}, memory{std::move(memory)}, properties{properties},
+ allocation_size{allocation_size}, shifted_type{ShiftType(type)} {}
- VKMemoryCommit Commit(vk::DeviceSize commit_size, vk::DeviceSize alignment) {
+ VKMemoryCommit Commit(VkDeviceSize commit_size, VkDeviceSize alignment) {
auto found = TryFindFreeSection(free_iterator, allocation_size,
static_cast<u64>(commit_size), static_cast<u64>(alignment));
if (!found) {
@@ -73,9 +67,8 @@ public:
}
/// Returns whether this allocation is compatible with the arguments.
- bool IsCompatible(vk::MemoryPropertyFlags wanted_properties, u32 type_mask) const {
- return (wanted_properties & properties) != vk::MemoryPropertyFlagBits(0) &&
- (type_mask & shifted_type) != 0;
+ bool IsCompatible(VkMemoryPropertyFlags wanted_properties, u32 type_mask) const {
+ return (wanted_properties & properties) && (type_mask & shifted_type) != 0;
}
private:
@@ -111,11 +104,11 @@ private:
return std::nullopt;
}
- const VKDevice& device; ///< Vulkan device.
- const vk::DeviceMemory memory; ///< Vulkan memory allocation handler.
- const vk::MemoryPropertyFlags properties; ///< Vulkan properties.
- const u64 allocation_size; ///< Size of this allocation.
- const u32 shifted_type; ///< Stored Vulkan type of this allocation, shifted.
+ const VKDevice& device; ///< Vulkan device.
+ const vk::DeviceMemory memory; ///< Vulkan memory allocation handler.
+ const VkMemoryPropertyFlags properties; ///< Vulkan properties.
+ const u64 allocation_size; ///< Size of this allocation.
+ const u32 shifted_type; ///< Stored Vulkan type of this allocation, shifted.
/// Hints where the next free region is likely going to be.
u64 free_iterator{};
@@ -125,22 +118,20 @@ private:
};
VKMemoryManager::VKMemoryManager(const VKDevice& device)
- : device{device}, properties{device.GetPhysical().getMemoryProperties(
- device.GetDispatchLoader())},
+ : device{device}, properties{device.GetPhysical().GetMemoryProperties()},
is_memory_unified{GetMemoryUnified(properties)} {}
VKMemoryManager::~VKMemoryManager() = default;
-VKMemoryCommit VKMemoryManager::Commit(const vk::MemoryRequirements& requirements,
+VKMemoryCommit VKMemoryManager::Commit(const VkMemoryRequirements& requirements,
bool host_visible) {
const u64 chunk_size = GetAllocationChunkSize(requirements.size);
// When a host visible commit is asked, search for host visible and coherent, otherwise search
// for a fast device local type.
- const vk::MemoryPropertyFlags wanted_properties =
- host_visible
- ? vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent
- : vk::MemoryPropertyFlagBits::eDeviceLocal;
+ const VkMemoryPropertyFlags wanted_properties =
+ host_visible ? VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT
+ : VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
if (auto commit = TryAllocCommit(requirements, wanted_properties)) {
return commit;
@@ -161,23 +152,19 @@ VKMemoryCommit VKMemoryManager::Commit(const vk::MemoryRequirements& requirement
return commit;
}
-VKMemoryCommit VKMemoryManager::Commit(vk::Buffer buffer, bool host_visible) {
- const auto dev = device.GetLogical();
- const auto& dld = device.GetDispatchLoader();
- auto commit = Commit(dev.getBufferMemoryRequirements(buffer, dld), host_visible);
- dev.bindBufferMemory(buffer, commit->GetMemory(), commit->GetOffset(), dld);
+VKMemoryCommit VKMemoryManager::Commit(const vk::Buffer& buffer, bool host_visible) {
+ auto commit = Commit(device.GetLogical().GetBufferMemoryRequirements(*buffer), host_visible);
+ buffer.BindMemory(commit->GetMemory(), commit->GetOffset());
return commit;
}
-VKMemoryCommit VKMemoryManager::Commit(vk::Image image, bool host_visible) {
- const auto dev = device.GetLogical();
- const auto& dld = device.GetDispatchLoader();
- auto commit = Commit(dev.getImageMemoryRequirements(image, dld), host_visible);
- dev.bindImageMemory(image, commit->GetMemory(), commit->GetOffset(), dld);
+VKMemoryCommit VKMemoryManager::Commit(const vk::Image& image, bool host_visible) {
+ auto commit = Commit(device.GetLogical().GetImageMemoryRequirements(*image), host_visible);
+ image.BindMemory(commit->GetMemory(), commit->GetOffset());
return commit;
}
-bool VKMemoryManager::AllocMemory(vk::MemoryPropertyFlags wanted_properties, u32 type_mask,
+bool VKMemoryManager::AllocMemory(VkMemoryPropertyFlags wanted_properties, u32 type_mask,
u64 size) {
const u32 type = [&] {
for (u32 type_index = 0; type_index < properties.memoryTypeCount; ++type_index) {
@@ -191,24 +178,26 @@ bool VKMemoryManager::AllocMemory(vk::MemoryPropertyFlags wanted_properties, u32
return 0U;
}();
- const auto dev = device.GetLogical();
- const auto& dld = device.GetDispatchLoader();
-
// Try to allocate found type.
- const vk::MemoryAllocateInfo memory_ai(size, type);
- vk::DeviceMemory memory;
- if (const auto res = dev.allocateMemory(&memory_ai, nullptr, &memory, dld);
- res != vk::Result::eSuccess) {
- LOG_CRITICAL(Render_Vulkan, "Device allocation failed with code {}!", vk::to_string(res));
+ VkMemoryAllocateInfo memory_ai;
+ memory_ai.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
+ memory_ai.pNext = nullptr;
+ memory_ai.allocationSize = size;
+ memory_ai.memoryTypeIndex = type;
+
+ vk::DeviceMemory memory = device.GetLogical().TryAllocateMemory(memory_ai);
+ if (!memory) {
+ LOG_CRITICAL(Render_Vulkan, "Device allocation failed!");
return false;
}
- allocations.push_back(
- std::make_unique<VKMemoryAllocation>(device, memory, wanted_properties, size, type));
+
+ allocations.push_back(std::make_unique<VKMemoryAllocation>(device, std::move(memory),
+ wanted_properties, size, type));
return true;
}
-VKMemoryCommit VKMemoryManager::TryAllocCommit(const vk::MemoryRequirements& requirements,
- vk::MemoryPropertyFlags wanted_properties) {
+VKMemoryCommit VKMemoryManager::TryAllocCommit(const VkMemoryRequirements& requirements,
+ VkMemoryPropertyFlags wanted_properties) {
for (auto& allocation : allocations) {
if (!allocation->IsCompatible(wanted_properties, requirements.memoryTypeBits)) {
continue;
@@ -220,10 +209,9 @@ VKMemoryCommit VKMemoryManager::TryAllocCommit(const vk::MemoryRequirements& req
return {};
}
-/*static*/ bool VKMemoryManager::GetMemoryUnified(
- const vk::PhysicalDeviceMemoryProperties& properties) {
+bool VKMemoryManager::GetMemoryUnified(const VkPhysicalDeviceMemoryProperties& properties) {
for (u32 heap_index = 0; heap_index < properties.memoryHeapCount; ++heap_index) {
- if (!(properties.memoryHeaps[heap_index].flags & vk::MemoryHeapFlagBits::eDeviceLocal)) {
+ if (!(properties.memoryHeaps[heap_index].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT)) {
// Memory is considered unified when heaps are device local only.
return false;
}
@@ -232,23 +220,19 @@ VKMemoryCommit VKMemoryManager::TryAllocCommit(const vk::MemoryRequirements& req
}
VKMemoryCommitImpl::VKMemoryCommitImpl(const VKDevice& device, VKMemoryAllocation* allocation,
- vk::DeviceMemory memory, u64 begin, u64 end)
- : device{device}, interval{begin, end}, memory{memory}, allocation{allocation} {}
+ const vk::DeviceMemory& memory, u64 begin, u64 end)
+ : device{device}, memory{memory}, interval{begin, end}, allocation{allocation} {}
VKMemoryCommitImpl::~VKMemoryCommitImpl() {
allocation->Free(this);
}
MemoryMap VKMemoryCommitImpl::Map(u64 size, u64 offset_) const {
- const auto dev = device.GetLogical();
- const auto address = reinterpret_cast<u8*>(
- dev.mapMemory(memory, interval.first + offset_, size, {}, device.GetDispatchLoader()));
- return MemoryMap{this, address};
+ return MemoryMap{this, memory.Map(interval.first + offset_, size)};
}
void VKMemoryCommitImpl::Unmap() const {
- const auto dev = device.GetLogical();
- dev.unmapMemory(memory, device.GetDispatchLoader());
+ memory.Unmap();
}
MemoryMap VKMemoryCommitImpl::Map() const {
diff --git a/src/video_core/renderer_vulkan/vk_memory_manager.h b/src/video_core/renderer_vulkan/vk_memory_manager.h
index cd00bb91b..35ee54d30 100644
--- a/src/video_core/renderer_vulkan/vk_memory_manager.h
+++ b/src/video_core/renderer_vulkan/vk_memory_manager.h
@@ -8,7 +8,7 @@
#include <utility>
#include <vector>
#include "common/common_types.h"
-#include "video_core/renderer_vulkan/declarations.h"
+#include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan {
@@ -32,13 +32,13 @@ public:
* memory. When passing false, it will try to allocate device local memory.
* @returns A memory commit.
*/
- VKMemoryCommit Commit(const vk::MemoryRequirements& reqs, bool host_visible);
+ VKMemoryCommit Commit(const VkMemoryRequirements& reqs, bool host_visible);
/// Commits memory required by the buffer and binds it.
- VKMemoryCommit Commit(vk::Buffer buffer, bool host_visible);
+ VKMemoryCommit Commit(const vk::Buffer& buffer, bool host_visible);
/// Commits memory required by the image and binds it.
- VKMemoryCommit Commit(vk::Image image, bool host_visible);
+ VKMemoryCommit Commit(const vk::Image& image, bool host_visible);
/// Returns true if the memory allocations are done always in host visible and coherent memory.
bool IsMemoryUnified() const {
@@ -47,18 +47,18 @@ public:
private:
/// Allocates a chunk of memory.
- bool AllocMemory(vk::MemoryPropertyFlags wanted_properties, u32 type_mask, u64 size);
+ bool AllocMemory(VkMemoryPropertyFlags wanted_properties, u32 type_mask, u64 size);
/// Tries to allocate a memory commit.
- VKMemoryCommit TryAllocCommit(const vk::MemoryRequirements& requirements,
- vk::MemoryPropertyFlags wanted_properties);
+ VKMemoryCommit TryAllocCommit(const VkMemoryRequirements& requirements,
+ VkMemoryPropertyFlags wanted_properties);
/// Returns true if the device uses an unified memory model.
- static bool GetMemoryUnified(const vk::PhysicalDeviceMemoryProperties& properties);
+ static bool GetMemoryUnified(const VkPhysicalDeviceMemoryProperties& properties);
- const VKDevice& device; ///< Device handler.
- const vk::PhysicalDeviceMemoryProperties properties; ///< Physical device properties.
- const bool is_memory_unified; ///< True if memory model is unified.
+ const VKDevice& device; ///< Device handler.
+ const VkPhysicalDeviceMemoryProperties properties; ///< Physical device properties.
+ const bool is_memory_unified; ///< True if memory model is unified.
std::vector<std::unique_ptr<VKMemoryAllocation>> allocations; ///< Current allocations.
};
@@ -68,7 +68,7 @@ class VKMemoryCommitImpl final {
public:
explicit VKMemoryCommitImpl(const VKDevice& device, VKMemoryAllocation* allocation,
- vk::DeviceMemory memory, u64 begin, u64 end);
+ const vk::DeviceMemory& memory, u64 begin, u64 end);
~VKMemoryCommitImpl();
/// Maps a memory region and returns a pointer to it.
@@ -80,13 +80,13 @@ public:
MemoryMap Map() const;
/// Returns the Vulkan memory handler.
- vk::DeviceMemory GetMemory() const {
- return memory;
+ VkDeviceMemory GetMemory() const {
+ return *memory;
}
/// Returns the start position of the commit relative to the allocation.
- vk::DeviceSize GetOffset() const {
- return static_cast<vk::DeviceSize>(interval.first);
+ VkDeviceSize GetOffset() const {
+ return static_cast<VkDeviceSize>(interval.first);
}
private:
@@ -94,8 +94,8 @@ private:
void Unmap() const;
const VKDevice& device; ///< Vulkan device.
+ const vk::DeviceMemory& memory; ///< Vulkan device memory handler.
std::pair<u64, u64> interval{}; ///< Interval where the commit exists.
- vk::DeviceMemory memory; ///< Vulkan device memory handler.
VKMemoryAllocation* allocation{}; ///< Pointer to the large memory allocation.
};
diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
index 557b9d662..90e3a8edd 100644
--- a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
@@ -13,7 +13,6 @@
#include "video_core/engines/kepler_compute.h"
#include "video_core/engines/maxwell_3d.h"
#include "video_core/memory_manager.h"
-#include "video_core/renderer_vulkan/declarations.h"
#include "video_core/renderer_vulkan/fixed_pipeline_state.h"
#include "video_core/renderer_vulkan/maxwell_to_vk.h"
#include "video_core/renderer_vulkan/vk_compute_pipeline.h"
@@ -26,6 +25,7 @@
#include "video_core/renderer_vulkan/vk_resource_manager.h"
#include "video_core/renderer_vulkan/vk_scheduler.h"
#include "video_core/renderer_vulkan/vk_update_descriptor.h"
+#include "video_core/renderer_vulkan/wrapper.h"
#include "video_core/shader/compiler_settings.h"
namespace Vulkan {
@@ -36,12 +36,11 @@ using Tegra::Engines::ShaderType;
namespace {
-// C++20's using enum
-constexpr auto eUniformBuffer = vk::DescriptorType::eUniformBuffer;
-constexpr auto eStorageBuffer = vk::DescriptorType::eStorageBuffer;
-constexpr auto eUniformTexelBuffer = vk::DescriptorType::eUniformTexelBuffer;
-constexpr auto eCombinedImageSampler = vk::DescriptorType::eCombinedImageSampler;
-constexpr auto eStorageImage = vk::DescriptorType::eStorageImage;
+constexpr VkDescriptorType UNIFORM_BUFFER = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
+constexpr VkDescriptorType STORAGE_BUFFER = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
+constexpr VkDescriptorType UNIFORM_TEXEL_BUFFER = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
+constexpr VkDescriptorType COMBINED_IMAGE_SAMPLER = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
+constexpr VkDescriptorType STORAGE_IMAGE = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
constexpr VideoCommon::Shader::CompilerSettings compiler_settings{
VideoCommon::Shader::CompileDepth::FullDecompile};
@@ -126,43 +125,48 @@ ShaderType GetShaderType(Maxwell::ShaderProgram program) {
}
}
-template <vk::DescriptorType descriptor_type, class Container>
-void AddBindings(std::vector<vk::DescriptorSetLayoutBinding>& bindings, u32& binding,
- vk::ShaderStageFlags stage_flags, const Container& container) {
+template <VkDescriptorType descriptor_type, class Container>
+void AddBindings(std::vector<VkDescriptorSetLayoutBinding>& bindings, u32& binding,
+ VkShaderStageFlags stage_flags, const Container& container) {
const u32 num_entries = static_cast<u32>(std::size(container));
for (std::size_t i = 0; i < num_entries; ++i) {
u32 count = 1;
- if constexpr (descriptor_type == eCombinedImageSampler) {
+ if constexpr (descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) {
// Combined image samplers can be arrayed.
count = container[i].Size();
}
- bindings.emplace_back(binding++, descriptor_type, count, stage_flags, nullptr);
+ VkDescriptorSetLayoutBinding& entry = bindings.emplace_back();
+ entry.binding = binding++;
+ entry.descriptorType = descriptor_type;
+ entry.descriptorCount = count;
+ entry.stageFlags = stage_flags;
+ entry.pImmutableSamplers = nullptr;
}
}
u32 FillDescriptorLayout(const ShaderEntries& entries,
- std::vector<vk::DescriptorSetLayoutBinding>& bindings,
+ std::vector<VkDescriptorSetLayoutBinding>& bindings,
Maxwell::ShaderProgram program_type, u32 base_binding) {
const ShaderType stage = GetStageFromProgram(program_type);
- const vk::ShaderStageFlags flags = MaxwellToVK::ShaderStage(stage);
+ const VkShaderStageFlags flags = MaxwellToVK::ShaderStage(stage);
u32 binding = base_binding;
- AddBindings<eUniformBuffer>(bindings, binding, flags, entries.const_buffers);
- AddBindings<eStorageBuffer>(bindings, binding, flags, entries.global_buffers);
- AddBindings<eUniformTexelBuffer>(bindings, binding, flags, entries.texel_buffers);
- AddBindings<eCombinedImageSampler>(bindings, binding, flags, entries.samplers);
- AddBindings<eStorageImage>(bindings, binding, flags, entries.images);
+ AddBindings<UNIFORM_BUFFER>(bindings, binding, flags, entries.const_buffers);
+ AddBindings<STORAGE_BUFFER>(bindings, binding, flags, entries.global_buffers);
+ AddBindings<UNIFORM_TEXEL_BUFFER>(bindings, binding, flags, entries.texel_buffers);
+ AddBindings<COMBINED_IMAGE_SAMPLER>(bindings, binding, flags, entries.samplers);
+ AddBindings<STORAGE_IMAGE>(bindings, binding, flags, entries.images);
return binding;
}
} // Anonymous namespace
CachedShader::CachedShader(Core::System& system, Tegra::Engines::ShaderType stage,
- GPUVAddr gpu_addr, VAddr cpu_addr, u8* host_ptr,
- ProgramCode program_code, u32 main_offset)
- : RasterizerCacheObject{host_ptr}, gpu_addr{gpu_addr}, cpu_addr{cpu_addr},
- program_code{std::move(program_code)}, registry{stage, GetEngine(system, stage)},
- shader_ir{this->program_code, main_offset, compiler_settings, registry},
+ GPUVAddr gpu_addr, VAddr cpu_addr, ProgramCode program_code,
+ u32 main_offset)
+ : RasterizerCacheObject{cpu_addr}, gpu_addr{gpu_addr}, program_code{std::move(program_code)},
+ registry{stage, GetEngine(system, stage)}, shader_ir{this->program_code, main_offset,
+ compiler_settings, registry},
entries{GenerateShaderEntries(shader_ir)} {}
CachedShader::~CachedShader() = default;
@@ -201,19 +205,19 @@ std::array<Shader, Maxwell::MaxShaderProgram> VKPipelineCache::GetShaders() {
auto& memory_manager{system.GPU().MemoryManager()};
const GPUVAddr program_addr{GetShaderAddress(system, program)};
- const auto host_ptr{memory_manager.GetPointer(program_addr)};
- auto shader = TryGet(host_ptr);
+ const std::optional cpu_addr = memory_manager.GpuToCpuAddress(program_addr);
+ ASSERT(cpu_addr);
+ auto shader = cpu_addr ? TryGet(*cpu_addr) : nullptr;
if (!shader) {
+ const auto host_ptr{memory_manager.GetPointer(program_addr)};
+
// No shader found - create a new one
constexpr u32 stage_offset = 10;
const auto stage = static_cast<Tegra::Engines::ShaderType>(index == 0 ? 0 : index - 1);
auto code = GetShaderCode(memory_manager, program_addr, host_ptr, false);
- const std::optional cpu_addr = memory_manager.GpuToCpuAddress(program_addr);
- ASSERT(cpu_addr);
-
shader = std::make_shared<CachedShader>(system, stage, program_addr, *cpu_addr,
- host_ptr, std::move(code), stage_offset);
+ std::move(code), stage_offset);
Register(shader);
}
shaders[index] = std::move(shader);
@@ -253,18 +257,19 @@ VKComputePipeline& VKPipelineCache::GetComputePipeline(const ComputePipelineCach
auto& memory_manager = system.GPU().MemoryManager();
const auto program_addr = key.shader;
- const auto host_ptr = memory_manager.GetPointer(program_addr);
- auto shader = TryGet(host_ptr);
+ const auto cpu_addr = memory_manager.GpuToCpuAddress(program_addr);
+ ASSERT(cpu_addr);
+
+ auto shader = cpu_addr ? TryGet(*cpu_addr) : nullptr;
if (!shader) {
// No shader found - create a new one
- const auto cpu_addr = memory_manager.GpuToCpuAddress(program_addr);
- ASSERT(cpu_addr);
+ const auto host_ptr = memory_manager.GetPointer(program_addr);
auto code = GetShaderCode(memory_manager, program_addr, host_ptr, true);
constexpr u32 kernel_main_offset = 0;
shader = std::make_shared<CachedShader>(system, Tegra::Engines::ShaderType::Compute,
- program_addr, *cpu_addr, host_ptr, std::move(code),
+ program_addr, *cpu_addr, std::move(code),
kernel_main_offset);
Register(shader);
}
@@ -317,7 +322,7 @@ void VKPipelineCache::Unregister(const Shader& shader) {
RasterizerCache::Unregister(shader);
}
-std::pair<SPIRVProgram, std::vector<vk::DescriptorSetLayoutBinding>>
+std::pair<SPIRVProgram, std::vector<VkDescriptorSetLayoutBinding>>
VKPipelineCache::DecompileShaders(const GraphicsPipelineCacheKey& key) {
const auto& fixed_state = key.fixed_state;
auto& memory_manager = system.GPU().MemoryManager();
@@ -334,7 +339,7 @@ VKPipelineCache::DecompileShaders(const GraphicsPipelineCacheKey& key) {
specialization.ndc_minus_one_to_one = fixed_state.rasterizer.ndc_minus_one_to_one;
SPIRVProgram program;
- std::vector<vk::DescriptorSetLayoutBinding> bindings;
+ std::vector<VkDescriptorSetLayoutBinding> bindings;
for (std::size_t index = 0; index < Maxwell::MaxShaderProgram; ++index) {
const auto program_enum = static_cast<Maxwell::ShaderProgram>(index);
@@ -345,8 +350,9 @@ VKPipelineCache::DecompileShaders(const GraphicsPipelineCacheKey& key) {
}
const GPUVAddr gpu_addr = GetShaderAddress(system, program_enum);
- const auto host_ptr = memory_manager.GetPointer(gpu_addr);
- const auto shader = TryGet(host_ptr);
+ const auto cpu_addr = memory_manager.GpuToCpuAddress(gpu_addr);
+ ASSERT(cpu_addr);
+ const auto shader = TryGet(*cpu_addr);
ASSERT(shader);
const std::size_t stage = index == 0 ? 0 : index - 1; // Stage indices are 0 - 5
@@ -369,32 +375,49 @@ VKPipelineCache::DecompileShaders(const GraphicsPipelineCacheKey& key) {
return {std::move(program), std::move(bindings)};
}
-template <vk::DescriptorType descriptor_type, class Container>
-void AddEntry(std::vector<vk::DescriptorUpdateTemplateEntry>& template_entries, u32& binding,
+template <VkDescriptorType descriptor_type, class Container>
+void AddEntry(std::vector<VkDescriptorUpdateTemplateEntry>& template_entries, u32& binding,
u32& offset, const Container& container) {
static constexpr u32 entry_size = static_cast<u32>(sizeof(DescriptorUpdateEntry));
const u32 count = static_cast<u32>(std::size(container));
- if constexpr (descriptor_type == eCombinedImageSampler) {
+ if constexpr (descriptor_type == COMBINED_IMAGE_SAMPLER) {
for (u32 i = 0; i < count; ++i) {
const u32 num_samplers = container[i].Size();
- template_entries.emplace_back(binding, 0, num_samplers, descriptor_type, offset,
- entry_size);
+ VkDescriptorUpdateTemplateEntry& entry = template_entries.emplace_back();
+ entry.dstBinding = binding;
+ entry.dstArrayElement = 0;
+ entry.descriptorCount = num_samplers;
+ entry.descriptorType = descriptor_type;
+ entry.offset = offset;
+ entry.stride = entry_size;
+
++binding;
offset += num_samplers * entry_size;
}
return;
}
- if constexpr (descriptor_type == eUniformTexelBuffer) {
+ if constexpr (descriptor_type == UNIFORM_TEXEL_BUFFER) {
// Nvidia has a bug where updating multiple uniform texels at once causes the driver to
// crash.
for (u32 i = 0; i < count; ++i) {
- template_entries.emplace_back(binding + i, 0, 1, descriptor_type,
- offset + i * entry_size, entry_size);
+ VkDescriptorUpdateTemplateEntry& entry = template_entries.emplace_back();
+ entry.dstBinding = binding + i;
+ entry.dstArrayElement = 0;
+ entry.descriptorCount = 1;
+ entry.descriptorType = descriptor_type;
+ entry.offset = offset + i * entry_size;
+ entry.stride = entry_size;
}
} else if (count > 0) {
- template_entries.emplace_back(binding, 0, count, descriptor_type, offset, entry_size);
+ VkDescriptorUpdateTemplateEntry& entry = template_entries.emplace_back();
+ entry.dstBinding = binding;
+ entry.dstArrayElement = 0;
+ entry.descriptorCount = count;
+ entry.descriptorType = descriptor_type;
+ entry.offset = offset;
+ entry.stride = entry_size;
}
offset += count * entry_size;
binding += count;
@@ -402,12 +425,12 @@ void AddEntry(std::vector<vk::DescriptorUpdateTemplateEntry>& template_entries,
void FillDescriptorUpdateTemplateEntries(
const ShaderEntries& entries, u32& binding, u32& offset,
- std::vector<vk::DescriptorUpdateTemplateEntry>& template_entries) {
- AddEntry<eUniformBuffer>(template_entries, offset, binding, entries.const_buffers);
- AddEntry<eStorageBuffer>(template_entries, offset, binding, entries.global_buffers);
- AddEntry<eUniformTexelBuffer>(template_entries, offset, binding, entries.texel_buffers);
- AddEntry<eCombinedImageSampler>(template_entries, offset, binding, entries.samplers);
- AddEntry<eStorageImage>(template_entries, offset, binding, entries.images);
+ std::vector<VkDescriptorUpdateTemplateEntryKHR>& template_entries) {
+ AddEntry<UNIFORM_BUFFER>(template_entries, offset, binding, entries.const_buffers);
+ AddEntry<STORAGE_BUFFER>(template_entries, offset, binding, entries.global_buffers);
+ AddEntry<UNIFORM_TEXEL_BUFFER>(template_entries, offset, binding, entries.texel_buffers);
+ AddEntry<COMBINED_IMAGE_SAMPLER>(template_entries, offset, binding, entries.samplers);
+ AddEntry<STORAGE_IMAGE>(template_entries, offset, binding, entries.images);
}
} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.h b/src/video_core/renderer_vulkan/vk_pipeline_cache.h
index c4c112290..7ccdb7083 100644
--- a/src/video_core/renderer_vulkan/vk_pipeline_cache.h
+++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.h
@@ -19,12 +19,12 @@
#include "video_core/engines/const_buffer_engine_interface.h"
#include "video_core/engines/maxwell_3d.h"
#include "video_core/rasterizer_cache.h"
-#include "video_core/renderer_vulkan/declarations.h"
#include "video_core/renderer_vulkan/fixed_pipeline_state.h"
#include "video_core/renderer_vulkan/vk_graphics_pipeline.h"
#include "video_core/renderer_vulkan/vk_renderpass_cache.h"
#include "video_core/renderer_vulkan/vk_resource_manager.h"
#include "video_core/renderer_vulkan/vk_shader_decompiler.h"
+#include "video_core/renderer_vulkan/wrapper.h"
#include "video_core/shader/registry.h"
#include "video_core/shader/shader_ir.h"
#include "video_core/surface.h"
@@ -113,17 +113,13 @@ namespace Vulkan {
class CachedShader final : public RasterizerCacheObject {
public:
explicit CachedShader(Core::System& system, Tegra::Engines::ShaderType stage, GPUVAddr gpu_addr,
- VAddr cpu_addr, u8* host_ptr, ProgramCode program_code, u32 main_offset);
+ VAddr cpu_addr, ProgramCode program_code, u32 main_offset);
~CachedShader();
GPUVAddr GetGpuAddr() const {
return gpu_addr;
}
- VAddr GetCpuAddr() const override {
- return cpu_addr;
- }
-
std::size_t GetSizeInBytes() const override {
return program_code.size() * sizeof(u64);
}
@@ -149,7 +145,6 @@ private:
Tegra::Engines::ShaderType stage);
GPUVAddr gpu_addr{};
- VAddr cpu_addr{};
ProgramCode program_code;
VideoCommon::Shader::Registry registry;
VideoCommon::Shader::ShaderIR shader_ir;
@@ -177,7 +172,7 @@ protected:
void FlushObjectInner(const Shader& object) override {}
private:
- std::pair<SPIRVProgram, std::vector<vk::DescriptorSetLayoutBinding>> DecompileShaders(
+ std::pair<SPIRVProgram, std::vector<VkDescriptorSetLayoutBinding>> DecompileShaders(
const GraphicsPipelineCacheKey& key);
Core::System& system;
@@ -199,6 +194,6 @@ private:
void FillDescriptorUpdateTemplateEntries(
const ShaderEntries& entries, u32& binding, u32& offset,
- std::vector<vk::DescriptorUpdateTemplateEntry>& template_entries);
+ std::vector<VkDescriptorUpdateTemplateEntryKHR>& template_entries);
} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_query_cache.cpp b/src/video_core/renderer_vulkan/vk_query_cache.cpp
index ffbf60dda..0966c7ff7 100644
--- a/src/video_core/renderer_vulkan/vk_query_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_query_cache.cpp
@@ -8,19 +8,19 @@
#include <utility>
#include <vector>
-#include "video_core/renderer_vulkan/declarations.h"
#include "video_core/renderer_vulkan/vk_device.h"
#include "video_core/renderer_vulkan/vk_query_cache.h"
#include "video_core/renderer_vulkan/vk_resource_manager.h"
#include "video_core/renderer_vulkan/vk_scheduler.h"
+#include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan {
namespace {
-constexpr std::array QUERY_TARGETS = {vk::QueryType::eOcclusion};
+constexpr std::array QUERY_TARGETS = {VK_QUERY_TYPE_OCCLUSION};
-constexpr vk::QueryType GetTarget(VideoCore::QueryType type) {
+constexpr VkQueryType GetTarget(VideoCore::QueryType type) {
return QUERY_TARGETS[static_cast<std::size_t>(type)];
}
@@ -35,29 +35,34 @@ void QueryPool::Initialize(const VKDevice& device_, VideoCore::QueryType type_)
type = type_;
}
-std::pair<vk::QueryPool, std::uint32_t> QueryPool::Commit(VKFence& fence) {
+std::pair<VkQueryPool, u32> QueryPool::Commit(VKFence& fence) {
std::size_t index;
do {
index = CommitResource(fence);
} while (usage[index]);
usage[index] = true;
- return {*pools[index / GROW_STEP], static_cast<std::uint32_t>(index % GROW_STEP)};
+ return {*pools[index / GROW_STEP], static_cast<u32>(index % GROW_STEP)};
}
void QueryPool::Allocate(std::size_t begin, std::size_t end) {
usage.resize(end);
- const auto dev = device->GetLogical();
- const u32 size = static_cast<u32>(end - begin);
- const vk::QueryPoolCreateInfo query_pool_ci({}, GetTarget(type), size, {});
- pools.push_back(dev.createQueryPoolUnique(query_pool_ci, nullptr, device->GetDispatchLoader()));
+ VkQueryPoolCreateInfo query_pool_ci;
+ query_pool_ci.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO;
+ query_pool_ci.pNext = nullptr;
+ query_pool_ci.flags = 0;
+ query_pool_ci.queryType = GetTarget(type);
+ query_pool_ci.queryCount = static_cast<u32>(end - begin);
+ query_pool_ci.pipelineStatistics = 0;
+ pools.push_back(device->GetLogical().CreateQueryPool(query_pool_ci));
}
-void QueryPool::Reserve(std::pair<vk::QueryPool, std::uint32_t> query) {
+void QueryPool::Reserve(std::pair<VkQueryPool, u32> query) {
const auto it =
- std::find_if(std::begin(pools), std::end(pools),
- [query_pool = query.first](auto& pool) { return query_pool == *pool; });
+ std::find_if(pools.begin(), pools.end(), [query_pool = query.first](vk::QueryPool& pool) {
+ return query_pool == *pool;
+ });
ASSERT(it != std::end(pools));
const std::ptrdiff_t pool_index = std::distance(std::begin(pools), it);
@@ -76,12 +81,11 @@ VKQueryCache::VKQueryCache(Core::System& system, VideoCore::RasterizerInterface&
VKQueryCache::~VKQueryCache() = default;
-std::pair<vk::QueryPool, std::uint32_t> VKQueryCache::AllocateQuery(VideoCore::QueryType type) {
+std::pair<VkQueryPool, u32> VKQueryCache::AllocateQuery(VideoCore::QueryType type) {
return query_pools[static_cast<std::size_t>(type)].Commit(scheduler.GetFence());
}
-void VKQueryCache::Reserve(VideoCore::QueryType type,
- std::pair<vk::QueryPool, std::uint32_t> query) {
+void VKQueryCache::Reserve(VideoCore::QueryType type, std::pair<VkQueryPool, u32> query) {
query_pools[static_cast<std::size_t>(type)].Reserve(query);
}
@@ -89,10 +93,10 @@ HostCounter::HostCounter(VKQueryCache& cache, std::shared_ptr<HostCounter> depen
VideoCore::QueryType type)
: VideoCommon::HostCounterBase<VKQueryCache, HostCounter>{std::move(dependency)}, cache{cache},
type{type}, query{cache.AllocateQuery(type)}, ticks{cache.Scheduler().Ticks()} {
- const auto dev = cache.Device().GetLogical();
- cache.Scheduler().Record([dev, query = query](vk::CommandBuffer cmdbuf, auto& dld) {
- dev.resetQueryPoolEXT(query.first, query.second, 1, dld);
- cmdbuf.beginQuery(query.first, query.second, vk::QueryControlFlagBits::ePrecise, dld);
+ const vk::Device* logical = &cache.Device().GetLogical();
+ cache.Scheduler().Record([logical, query = query](vk::CommandBuffer cmdbuf) {
+ logical->ResetQueryPoolEXT(query.first, query.second, 1);
+ cmdbuf.BeginQuery(query.first, query.second, VK_QUERY_CONTROL_PRECISE_BIT);
});
}
@@ -101,22 +105,16 @@ HostCounter::~HostCounter() {
}
void HostCounter::EndQuery() {
- cache.Scheduler().Record([query = query](auto cmdbuf, auto& dld) {
- cmdbuf.endQuery(query.first, query.second, dld);
- });
+ cache.Scheduler().Record(
+ [query = query](vk::CommandBuffer cmdbuf) { cmdbuf.EndQuery(query.first, query.second); });
}
u64 HostCounter::BlockingQuery() const {
if (ticks >= cache.Scheduler().Ticks()) {
cache.Scheduler().Flush();
}
-
- const auto dev = cache.Device().GetLogical();
- const auto& dld = cache.Device().GetDispatchLoader();
- u64 value;
- dev.getQueryPoolResults(query.first, query.second, 1, sizeof(value), &value, sizeof(value),
- vk::QueryResultFlagBits::e64 | vk::QueryResultFlagBits::eWait, dld);
- return value;
+ return cache.Device().GetLogical().GetQueryResult<u64>(
+ query.first, query.second, VK_QUERY_RESULT_64_BIT | VK_QUERY_RESULT_WAIT_BIT);
}
} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_query_cache.h b/src/video_core/renderer_vulkan/vk_query_cache.h
index c3092ee96..b63784f4b 100644
--- a/src/video_core/renderer_vulkan/vk_query_cache.h
+++ b/src/video_core/renderer_vulkan/vk_query_cache.h
@@ -12,8 +12,8 @@
#include "common/common_types.h"
#include "video_core/query_cache.h"
-#include "video_core/renderer_vulkan/declarations.h"
#include "video_core/renderer_vulkan/vk_resource_manager.h"
+#include "video_core/renderer_vulkan/wrapper.h"
namespace VideoCore {
class RasterizerInterface;
@@ -36,9 +36,9 @@ public:
void Initialize(const VKDevice& device, VideoCore::QueryType type);
- std::pair<vk::QueryPool, std::uint32_t> Commit(VKFence& fence);
+ std::pair<VkQueryPool, u32> Commit(VKFence& fence);
- void Reserve(std::pair<vk::QueryPool, std::uint32_t> query);
+ void Reserve(std::pair<VkQueryPool, u32> query);
protected:
void Allocate(std::size_t begin, std::size_t end) override;
@@ -49,7 +49,7 @@ private:
const VKDevice* device = nullptr;
VideoCore::QueryType type = {};
- std::vector<UniqueQueryPool> pools;
+ std::vector<vk::QueryPool> pools;
std::vector<bool> usage;
};
@@ -61,9 +61,9 @@ public:
const VKDevice& device, VKScheduler& scheduler);
~VKQueryCache();
- std::pair<vk::QueryPool, std::uint32_t> AllocateQuery(VideoCore::QueryType type);
+ std::pair<VkQueryPool, u32> AllocateQuery(VideoCore::QueryType type);
- void Reserve(VideoCore::QueryType type, std::pair<vk::QueryPool, std::uint32_t> query);
+ void Reserve(VideoCore::QueryType type, std::pair<VkQueryPool, u32> query);
const VKDevice& Device() const noexcept {
return device;
@@ -91,7 +91,7 @@ private:
VKQueryCache& cache;
const VideoCore::QueryType type;
- const std::pair<vk::QueryPool, std::uint32_t> query;
+ const std::pair<VkQueryPool, u32> query;
const u64 ticks;
};
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
index 58c69b786..774ba1f26 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
@@ -19,7 +19,6 @@
#include "core/memory.h"
#include "video_core/engines/kepler_compute.h"
#include "video_core/engines/maxwell_3d.h"
-#include "video_core/renderer_vulkan/declarations.h"
#include "video_core/renderer_vulkan/fixed_pipeline_state.h"
#include "video_core/renderer_vulkan/maxwell_to_vk.h"
#include "video_core/renderer_vulkan/renderer_vulkan.h"
@@ -39,6 +38,7 @@
#include "video_core/renderer_vulkan/vk_state_tracker.h"
#include "video_core/renderer_vulkan/vk_texture_cache.h"
#include "video_core/renderer_vulkan/vk_update_descriptor.h"
+#include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan {
@@ -60,32 +60,42 @@ namespace {
constexpr auto ComputeShaderIndex = static_cast<std::size_t>(Tegra::Engines::ShaderType::Compute);
-vk::Viewport GetViewportState(const VKDevice& device, const Maxwell& regs, std::size_t index) {
- const auto& viewport = regs.viewport_transform[index];
- const float x = viewport.translate_x - viewport.scale_x;
- const float y = viewport.translate_y - viewport.scale_y;
- const float width = viewport.scale_x * 2.0f;
- const float height = viewport.scale_y * 2.0f;
+VkViewport GetViewportState(const VKDevice& device, const Maxwell& regs, std::size_t index) {
+ const auto& src = regs.viewport_transform[index];
+ const float width = src.scale_x * 2.0f;
+ const float height = src.scale_y * 2.0f;
- const float reduce_z = regs.depth_mode == Maxwell::DepthMode::MinusOneToOne;
- float near = viewport.translate_z - viewport.scale_z * reduce_z;
- float far = viewport.translate_z + viewport.scale_z;
+ VkViewport viewport;
+ viewport.x = src.translate_x - src.scale_x;
+ viewport.y = src.translate_y - src.scale_y;
+ viewport.width = width != 0.0f ? width : 1.0f;
+ viewport.height = height != 0.0f ? height : 1.0f;
+
+ const float reduce_z = regs.depth_mode == Maxwell::DepthMode::MinusOneToOne ? 1.0f : 0.0f;
+ viewport.minDepth = src.translate_z - src.scale_z * reduce_z;
+ viewport.maxDepth = src.translate_z + src.scale_z;
if (!device.IsExtDepthRangeUnrestrictedSupported()) {
- near = std::clamp(near, 0.0f, 1.0f);
- far = std::clamp(far, 0.0f, 1.0f);
+ viewport.minDepth = std::clamp(viewport.minDepth, 0.0f, 1.0f);
+ viewport.maxDepth = std::clamp(viewport.maxDepth, 0.0f, 1.0f);
}
-
- return vk::Viewport(x, y, width != 0 ? width : 1.0f, height != 0 ? height : 1.0f, near, far);
+ return viewport;
}
-constexpr vk::Rect2D GetScissorState(const Maxwell& regs, std::size_t index) {
- const auto& scissor = regs.scissor_test[index];
- if (!scissor.enable) {
- return {{0, 0}, {INT32_MAX, INT32_MAX}};
+VkRect2D GetScissorState(const Maxwell& regs, std::size_t index) {
+ const auto& src = regs.scissor_test[index];
+ VkRect2D scissor;
+ if (src.enable) {
+ scissor.offset.x = static_cast<s32>(src.min_x);
+ scissor.offset.y = static_cast<s32>(src.min_y);
+ scissor.extent.width = src.max_x - src.min_x;
+ scissor.extent.height = src.max_y - src.min_y;
+ } else {
+ scissor.offset.x = 0;
+ scissor.offset.y = 0;
+ scissor.extent.width = std::numeric_limits<s32>::max();
+ scissor.extent.height = std::numeric_limits<s32>::max();
}
- const u32 width = scissor.max_x - scissor.min_x;
- const u32 height = scissor.max_y - scissor.min_y;
- return {{static_cast<s32>(scissor.min_x), static_cast<s32>(scissor.min_y)}, {width, height}};
+ return scissor;
}
std::array<GPUVAddr, Maxwell::MaxShaderProgram> GetShaderAddresses(
@@ -97,8 +107,8 @@ std::array<GPUVAddr, Maxwell::MaxShaderProgram> GetShaderAddresses(
return addresses;
}
-void TransitionImages(const std::vector<ImageView>& views, vk::PipelineStageFlags pipeline_stage,
- vk::AccessFlags access) {
+void TransitionImages(const std::vector<ImageView>& views, VkPipelineStageFlags pipeline_stage,
+ VkAccessFlags access) {
for (auto& [view, layout] : views) {
view->Transition(*layout, pipeline_stage, access);
}
@@ -127,13 +137,13 @@ Tegra::Texture::FullTextureInfo GetTextureInfo(const Engine& engine, const Entry
class BufferBindings final {
public:
- void AddVertexBinding(const vk::Buffer* buffer, vk::DeviceSize offset) {
+ void AddVertexBinding(const VkBuffer* buffer, VkDeviceSize offset) {
vertex.buffer_ptrs[vertex.num_buffers] = buffer;
vertex.offsets[vertex.num_buffers] = offset;
++vertex.num_buffers;
}
- void SetIndexBinding(const vk::Buffer* buffer, vk::DeviceSize offset, vk::IndexType type) {
+ void SetIndexBinding(const VkBuffer* buffer, VkDeviceSize offset, VkIndexType type) {
index.buffer = buffer;
index.offset = offset;
index.type = type;
@@ -217,14 +227,14 @@ private:
// Some of these fields are intentionally left uninitialized to avoid initializing them twice.
struct {
std::size_t num_buffers = 0;
- std::array<const vk::Buffer*, Maxwell::NumVertexArrays> buffer_ptrs;
- std::array<vk::DeviceSize, Maxwell::NumVertexArrays> offsets;
+ std::array<const VkBuffer*, Maxwell::NumVertexArrays> buffer_ptrs;
+ std::array<VkDeviceSize, Maxwell::NumVertexArrays> offsets;
} vertex;
struct {
- const vk::Buffer* buffer = nullptr;
- vk::DeviceSize offset;
- vk::IndexType type;
+ const VkBuffer* buffer = nullptr;
+ VkDeviceSize offset;
+ VkIndexType type;
} index;
template <std::size_t N>
@@ -243,38 +253,35 @@ private:
return;
}
- std::array<vk::Buffer, N> buffers;
+ std::array<VkBuffer, N> buffers;
std::transform(vertex.buffer_ptrs.begin(), vertex.buffer_ptrs.begin() + N, buffers.begin(),
[](const auto ptr) { return *ptr; });
- std::array<vk::DeviceSize, N> offsets;
+ std::array<VkDeviceSize, N> offsets;
std::copy(vertex.offsets.begin(), vertex.offsets.begin() + N, offsets.begin());
if constexpr (is_indexed) {
// Indexed draw
scheduler.Record([buffers, offsets, index_buffer = *index.buffer,
index_offset = index.offset,
- index_type = index.type](auto cmdbuf, auto& dld) {
- cmdbuf.bindIndexBuffer(index_buffer, index_offset, index_type, dld);
- cmdbuf.bindVertexBuffers(0, static_cast<u32>(N), buffers.data(), offsets.data(),
- dld);
+ index_type = index.type](vk::CommandBuffer cmdbuf) {
+ cmdbuf.BindIndexBuffer(index_buffer, index_offset, index_type);
+ cmdbuf.BindVertexBuffers(0, static_cast<u32>(N), buffers.data(), offsets.data());
});
} else {
// Array draw
- scheduler.Record([buffers, offsets](auto cmdbuf, auto& dld) {
- cmdbuf.bindVertexBuffers(0, static_cast<u32>(N), buffers.data(), offsets.data(),
- dld);
+ scheduler.Record([buffers, offsets](vk::CommandBuffer cmdbuf) {
+ cmdbuf.BindVertexBuffers(0, static_cast<u32>(N), buffers.data(), offsets.data());
});
}
}
};
-void RasterizerVulkan::DrawParameters::Draw(vk::CommandBuffer cmdbuf,
- const vk::DispatchLoaderDynamic& dld) const {
+void RasterizerVulkan::DrawParameters::Draw(vk::CommandBuffer cmdbuf) const {
if (is_indexed) {
- cmdbuf.drawIndexed(num_vertices, num_instances, 0, base_vertex, base_instance, dld);
+ cmdbuf.DrawIndexed(num_vertices, num_instances, 0, base_vertex, base_instance);
} else {
- cmdbuf.draw(num_vertices, num_instances, base_vertex, base_instance, dld);
+ cmdbuf.Draw(num_vertices, num_instances, base_vertex, base_instance);
}
}
@@ -337,7 +344,7 @@ void RasterizerVulkan::Draw(bool is_indexed, bool is_instanced) {
const auto renderpass = pipeline.GetRenderPass();
const auto [framebuffer, render_area] = ConfigureFramebuffers(renderpass);
- scheduler.RequestRenderpass({renderpass, framebuffer, {{0, 0}, render_area}, 0, nullptr});
+ scheduler.RequestRenderpass(renderpass, framebuffer, render_area);
UpdateDynamicStates();
@@ -345,19 +352,19 @@ void RasterizerVulkan::Draw(bool is_indexed, bool is_instanced) {
if (device.IsNvDeviceDiagnosticCheckpoints()) {
scheduler.Record(
- [&pipeline](auto cmdbuf, auto& dld) { cmdbuf.setCheckpointNV(&pipeline, dld); });
+ [&pipeline](vk::CommandBuffer cmdbuf) { cmdbuf.SetCheckpointNV(&pipeline); });
}
BeginTransformFeedback();
const auto pipeline_layout = pipeline.GetLayout();
const auto descriptor_set = pipeline.CommitDescriptorSet();
- scheduler.Record([pipeline_layout, descriptor_set, draw_params](auto cmdbuf, auto& dld) {
+ scheduler.Record([pipeline_layout, descriptor_set, draw_params](vk::CommandBuffer cmdbuf) {
if (descriptor_set) {
- cmdbuf.bindDescriptorSets(vk::PipelineBindPoint::eGraphics, pipeline_layout,
- DESCRIPTOR_SET, 1, &descriptor_set, 0, nullptr, dld);
+ cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout,
+ DESCRIPTOR_SET, descriptor_set, {});
}
- draw_params.Draw(cmdbuf, dld);
+ draw_params.Draw(cmdbuf);
});
EndTransformFeedback();
@@ -389,48 +396,54 @@ void RasterizerVulkan::Clear() {
DEBUG_ASSERT(texceptions.none());
SetupImageTransitions(0, color_attachments, zeta_attachment);
- const vk::RenderPass renderpass = renderpass_cache.GetRenderPass(GetRenderPassParams(0));
+ const VkRenderPass renderpass = renderpass_cache.GetRenderPass(GetRenderPassParams(0));
const auto [framebuffer, render_area] = ConfigureFramebuffers(renderpass);
- scheduler.RequestRenderpass({renderpass, framebuffer, {{0, 0}, render_area}, 0, nullptr});
-
- const auto& scissor = regs.scissor_test[0];
- const vk::Offset2D scissor_offset(scissor.min_x, scissor.min_y);
- vk::Extent2D scissor_extent{scissor.max_x - scissor.min_x, scissor.max_y - scissor.min_y};
- scissor_extent.width = std::min(scissor_extent.width, render_area.width);
- scissor_extent.height = std::min(scissor_extent.height, render_area.height);
+ scheduler.RequestRenderpass(renderpass, framebuffer, render_area);
- const u32 layer = regs.clear_buffers.layer;
- const vk::ClearRect clear_rect({scissor_offset, scissor_extent}, layer, 1);
+ VkClearRect clear_rect;
+ clear_rect.baseArrayLayer = regs.clear_buffers.layer;
+ clear_rect.layerCount = 1;
+ clear_rect.rect = GetScissorState(regs, 0);
+ clear_rect.rect.extent.width = std::min(clear_rect.rect.extent.width, render_area.width);
+ clear_rect.rect.extent.height = std::min(clear_rect.rect.extent.height, render_area.height);
if (use_color) {
- const std::array clear_color = {regs.clear_color[0], regs.clear_color[1],
- regs.clear_color[2], regs.clear_color[3]};
- const vk::ClearValue clear_value{clear_color};
+ VkClearValue clear_value;
+ std::memcpy(clear_value.color.float32, regs.clear_color, sizeof(regs.clear_color));
+
const u32 color_attachment = regs.clear_buffers.RT;
- scheduler.Record([color_attachment, clear_value, clear_rect](auto cmdbuf, auto& dld) {
- const vk::ClearAttachment attachment(vk::ImageAspectFlagBits::eColor, color_attachment,
- clear_value);
- cmdbuf.clearAttachments(1, &attachment, 1, &clear_rect, dld);
+ scheduler.Record([color_attachment, clear_value, clear_rect](vk::CommandBuffer cmdbuf) {
+ VkClearAttachment attachment;
+ attachment.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
+ attachment.colorAttachment = color_attachment;
+ attachment.clearValue = clear_value;
+ cmdbuf.ClearAttachments(attachment, clear_rect);
});
}
if (!use_depth && !use_stencil) {
return;
}
- vk::ImageAspectFlags aspect_flags;
+ VkImageAspectFlags aspect_flags = 0;
if (use_depth) {
- aspect_flags |= vk::ImageAspectFlagBits::eDepth;
+ aspect_flags |= VK_IMAGE_ASPECT_DEPTH_BIT;
}
if (use_stencil) {
- aspect_flags |= vk::ImageAspectFlagBits::eStencil;
+ aspect_flags |= VK_IMAGE_ASPECT_STENCIL_BIT;
}
scheduler.Record([clear_depth = regs.clear_depth, clear_stencil = regs.clear_stencil,
- clear_rect, aspect_flags](auto cmdbuf, auto& dld) {
- const vk::ClearDepthStencilValue clear_zeta(clear_depth, clear_stencil);
- const vk::ClearValue clear_value{clear_zeta};
- const vk::ClearAttachment attachment(aspect_flags, 0, clear_value);
- cmdbuf.clearAttachments(1, &attachment, 1, &clear_rect, dld);
+ clear_rect, aspect_flags](vk::CommandBuffer cmdbuf) {
+ VkClearValue clear_value;
+ clear_value.depthStencil.depth = clear_depth;
+ clear_value.depthStencil.stencil = clear_stencil;
+
+ VkClearAttachment attachment;
+ attachment.aspectMask = aspect_flags;
+ attachment.colorAttachment = 0;
+ attachment.clearValue.depthStencil.depth = clear_depth;
+ attachment.clearValue.depthStencil.stencil = clear_stencil;
+ cmdbuf.ClearAttachments(attachment, clear_rect);
});
}
@@ -463,24 +476,24 @@ void RasterizerVulkan::DispatchCompute(GPUVAddr code_addr) {
buffer_cache.Unmap();
- TransitionImages(sampled_views, vk::PipelineStageFlagBits::eComputeShader,
- vk::AccessFlagBits::eShaderRead);
- TransitionImages(image_views, vk::PipelineStageFlagBits::eComputeShader,
- vk::AccessFlagBits::eShaderRead | vk::AccessFlagBits::eShaderWrite);
+ TransitionImages(sampled_views, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
+ VK_ACCESS_SHADER_READ_BIT);
+ TransitionImages(image_views, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
+ VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT);
if (device.IsNvDeviceDiagnosticCheckpoints()) {
scheduler.Record(
- [&pipeline](auto cmdbuf, auto& dld) { cmdbuf.setCheckpointNV(nullptr, dld); });
+ [&pipeline](vk::CommandBuffer cmdbuf) { cmdbuf.SetCheckpointNV(nullptr); });
}
scheduler.Record([grid_x = launch_desc.grid_dim_x, grid_y = launch_desc.grid_dim_y,
grid_z = launch_desc.grid_dim_z, pipeline_handle = pipeline.GetHandle(),
layout = pipeline.GetLayout(),
- descriptor_set = pipeline.CommitDescriptorSet()](auto cmdbuf, auto& dld) {
- cmdbuf.bindPipeline(vk::PipelineBindPoint::eCompute, pipeline_handle, dld);
- cmdbuf.bindDescriptorSets(vk::PipelineBindPoint::eCompute, layout, DESCRIPTOR_SET, 1,
- &descriptor_set, 0, nullptr, dld);
- cmdbuf.dispatch(grid_x, grid_y, grid_z, dld);
+ descriptor_set = pipeline.CommitDescriptorSet()](vk::CommandBuffer cmdbuf) {
+ cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, pipeline_handle);
+ cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_COMPUTE, layout, DESCRIPTOR_SET,
+ descriptor_set, {});
+ cmdbuf.Dispatch(grid_x, grid_y, grid_z);
});
}
@@ -495,20 +508,26 @@ void RasterizerVulkan::Query(GPUVAddr gpu_addr, VideoCore::QueryType type,
void RasterizerVulkan::FlushAll() {}
-void RasterizerVulkan::FlushRegion(CacheAddr addr, u64 size) {
+void RasterizerVulkan::FlushRegion(VAddr addr, u64 size) {
+ if (addr == 0 || size == 0) {
+ return;
+ }
texture_cache.FlushRegion(addr, size);
buffer_cache.FlushRegion(addr, size);
query_cache.FlushRegion(addr, size);
}
-void RasterizerVulkan::InvalidateRegion(CacheAddr addr, u64 size) {
+void RasterizerVulkan::InvalidateRegion(VAddr addr, u64 size) {
+ if (addr == 0 || size == 0) {
+ return;
+ }
texture_cache.InvalidateRegion(addr, size);
pipeline_cache.InvalidateRegion(addr, size);
buffer_cache.InvalidateRegion(addr, size);
query_cache.InvalidateRegion(addr, size);
}
-void RasterizerVulkan::FlushAndInvalidateRegion(CacheAddr addr, u64 size) {
+void RasterizerVulkan::FlushAndInvalidateRegion(VAddr addr, u64 size) {
FlushRegion(addr, size);
InvalidateRegion(addr, size);
}
@@ -540,8 +559,7 @@ bool RasterizerVulkan::AccelerateDisplay(const Tegra::FramebufferConfig& config,
return false;
}
- const u8* host_ptr{system.Memory().GetPointer(framebuffer_addr)};
- const auto surface{texture_cache.TryFindFramebufferSurface(host_ptr)};
+ const auto surface{texture_cache.TryFindFramebufferSurface(framebuffer_addr)};
if (!surface) {
return false;
}
@@ -594,7 +612,7 @@ RasterizerVulkan::Texceptions RasterizerVulkan::UpdateAttachments() {
Texceptions texceptions;
for (std::size_t rt = 0; rt < Maxwell::NumRenderTargets; ++rt) {
if (update_rendertargets) {
- color_attachments[rt] = texture_cache.GetColorBufferSurface(rt, true);
+ color_attachments[rt] = texture_cache.GetColorBufferSurface(rt);
}
if (color_attachments[rt] && WalkAttachmentOverlaps(*color_attachments[rt])) {
texceptions[rt] = true;
@@ -602,7 +620,7 @@ RasterizerVulkan::Texceptions RasterizerVulkan::UpdateAttachments() {
}
if (update_rendertargets) {
- zeta_attachment = texture_cache.GetDepthBufferSurface(true);
+ zeta_attachment = texture_cache.GetDepthBufferSurface();
}
if (zeta_attachment && WalkAttachmentOverlaps(*zeta_attachment)) {
texceptions[ZETA_TEXCEPTION_INDEX] = true;
@@ -620,13 +638,13 @@ bool RasterizerVulkan::WalkAttachmentOverlaps(const CachedSurfaceView& attachmen
continue;
}
overlap = true;
- *layout = vk::ImageLayout::eGeneral;
+ *layout = VK_IMAGE_LAYOUT_GENERAL;
}
return overlap;
}
-std::tuple<vk::Framebuffer, vk::Extent2D> RasterizerVulkan::ConfigureFramebuffers(
- vk::RenderPass renderpass) {
+std::tuple<VkFramebuffer, VkExtent2D> RasterizerVulkan::ConfigureFramebuffers(
+ VkRenderPass renderpass) {
FramebufferCacheKey key{renderpass, std::numeric_limits<u32>::max(),
std::numeric_limits<u32>::max(), std::numeric_limits<u32>::max()};
@@ -653,15 +671,20 @@ std::tuple<vk::Framebuffer, vk::Extent2D> RasterizerVulkan::ConfigureFramebuffer
const auto [fbentry, is_cache_miss] = framebuffer_cache.try_emplace(key);
auto& framebuffer = fbentry->second;
if (is_cache_miss) {
- const vk::FramebufferCreateInfo framebuffer_ci(
- {}, key.renderpass, static_cast<u32>(key.views.size()), key.views.data(), key.width,
- key.height, key.layers);
- const auto dev = device.GetLogical();
- const auto& dld = device.GetDispatchLoader();
- framebuffer = dev.createFramebufferUnique(framebuffer_ci, nullptr, dld);
- }
-
- return {*framebuffer, vk::Extent2D{key.width, key.height}};
+ VkFramebufferCreateInfo framebuffer_ci;
+ framebuffer_ci.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
+ framebuffer_ci.pNext = nullptr;
+ framebuffer_ci.flags = 0;
+ framebuffer_ci.renderPass = key.renderpass;
+ framebuffer_ci.attachmentCount = static_cast<u32>(key.views.size());
+ framebuffer_ci.pAttachments = key.views.data();
+ framebuffer_ci.width = key.width;
+ framebuffer_ci.height = key.height;
+ framebuffer_ci.layers = key.layers;
+ framebuffer = device.GetLogical().CreateFramebuffer(framebuffer_ci);
+ }
+
+ return {*framebuffer, VkExtent2D{key.width, key.height}};
}
RasterizerVulkan::DrawParameters RasterizerVulkan::SetupGeometry(FixedPipelineState& fixed_state,
@@ -709,10 +732,9 @@ void RasterizerVulkan::SetupShaderDescriptors(
void RasterizerVulkan::SetupImageTransitions(
Texceptions texceptions, const std::array<View, Maxwell::NumRenderTargets>& color_attachments,
const View& zeta_attachment) {
- TransitionImages(sampled_views, vk::PipelineStageFlagBits::eAllGraphics,
- vk::AccessFlagBits::eShaderRead);
- TransitionImages(image_views, vk::PipelineStageFlagBits::eAllGraphics,
- vk::AccessFlagBits::eShaderRead | vk::AccessFlagBits::eShaderWrite);
+ TransitionImages(sampled_views, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, VK_ACCESS_SHADER_READ_BIT);
+ TransitionImages(image_views, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
+ VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT);
for (std::size_t rt = 0; rt < std::size(color_attachments); ++rt) {
const auto color_attachment = color_attachments[rt];
@@ -720,19 +742,19 @@ void RasterizerVulkan::SetupImageTransitions(
continue;
}
const auto image_layout =
- texceptions[rt] ? vk::ImageLayout::eGeneral : vk::ImageLayout::eColorAttachmentOptimal;
- color_attachment->Transition(
- image_layout, vk::PipelineStageFlagBits::eColorAttachmentOutput,
- vk::AccessFlagBits::eColorAttachmentRead | vk::AccessFlagBits::eColorAttachmentWrite);
+ texceptions[rt] ? VK_IMAGE_LAYOUT_GENERAL : VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+ color_attachment->Transition(image_layout, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
+ VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
+ VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT);
}
if (zeta_attachment != nullptr) {
const auto image_layout = texceptions[ZETA_TEXCEPTION_INDEX]
- ? vk::ImageLayout::eGeneral
- : vk::ImageLayout::eDepthStencilAttachmentOptimal;
- zeta_attachment->Transition(image_layout, vk::PipelineStageFlagBits::eLateFragmentTests,
- vk::AccessFlagBits::eDepthStencilAttachmentRead |
- vk::AccessFlagBits::eDepthStencilAttachmentWrite);
+ ? VK_IMAGE_LAYOUT_GENERAL
+ : VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
+ zeta_attachment->Transition(image_layout, VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
+ VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT |
+ VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT);
}
}
@@ -768,9 +790,9 @@ void RasterizerVulkan::BeginTransformFeedback() {
const std::size_t size = binding.buffer_size;
const auto [buffer, offset] = buffer_cache.UploadMemory(gpu_addr, size, 4, true);
- scheduler.Record([buffer = *buffer, offset = offset, size](auto cmdbuf, auto& dld) {
- cmdbuf.bindTransformFeedbackBuffersEXT(0, {buffer}, {offset}, {size}, dld);
- cmdbuf.beginTransformFeedbackEXT(0, {}, {}, dld);
+ scheduler.Record([buffer = *buffer, offset = offset, size](vk::CommandBuffer cmdbuf) {
+ cmdbuf.BindTransformFeedbackBuffersEXT(0, 1, &buffer, &offset, &size);
+ cmdbuf.BeginTransformFeedbackEXT(0, 0, nullptr, nullptr);
});
}
@@ -781,7 +803,7 @@ void RasterizerVulkan::EndTransformFeedback() {
}
scheduler.Record(
- [](auto cmdbuf, auto& dld) { cmdbuf.endTransformFeedbackEXT(0, {}, {}, dld); });
+ [](vk::CommandBuffer cmdbuf) { cmdbuf.EndTransformFeedbackEXT(0, 0, nullptr, nullptr); });
}
void RasterizerVulkan::SetupVertexArrays(FixedPipelineState::VertexInput& vertex_input,
@@ -832,7 +854,7 @@ void RasterizerVulkan::SetupIndexBuffer(BufferBindings& buffer_bindings, DrawPar
} else {
const auto [buffer, offset] =
quad_array_pass.Assemble(params.num_vertices, params.base_vertex);
- buffer_bindings.SetIndexBinding(&buffer, offset, vk::IndexType::eUint32);
+ buffer_bindings.SetIndexBinding(buffer, offset, VK_INDEX_TYPE_UINT32);
params.base_vertex = 0;
params.num_vertices = params.num_vertices * 6 / 4;
params.is_indexed = true;
@@ -1017,7 +1039,7 @@ void RasterizerVulkan::SetupTexture(const Tegra::Texture::FullTextureInfo& textu
update_descriptor_queue.AddSampledImage(sampler, image_view);
const auto image_layout = update_descriptor_queue.GetLastImageLayout();
- *image_layout = vk::ImageLayout::eShaderReadOnlyOptimal;
+ *image_layout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
sampled_views.push_back(ImageView{std::move(view), image_layout});
}
@@ -1034,7 +1056,7 @@ void RasterizerVulkan::SetupImage(const Tegra::Texture::TICEntry& tic, const Ima
update_descriptor_queue.AddImage(image_view);
const auto image_layout = update_descriptor_queue.GetLastImageLayout();
- *image_layout = vk::ImageLayout::eGeneral;
+ *image_layout = VK_IMAGE_LAYOUT_GENERAL;
image_views.push_back(ImageView{std::move(view), image_layout});
}
@@ -1051,9 +1073,7 @@ void RasterizerVulkan::UpdateViewportsState(Tegra::Engines::Maxwell3D::Regs& reg
GetViewportState(device, regs, 10), GetViewportState(device, regs, 11),
GetViewportState(device, regs, 12), GetViewportState(device, regs, 13),
GetViewportState(device, regs, 14), GetViewportState(device, regs, 15)};
- scheduler.Record([viewports](auto cmdbuf, auto& dld) {
- cmdbuf.setViewport(0, static_cast<u32>(viewports.size()), viewports.data(), dld);
- });
+ scheduler.Record([viewports](vk::CommandBuffer cmdbuf) { cmdbuf.SetViewport(0, viewports); });
}
void RasterizerVulkan::UpdateScissorsState(Tegra::Engines::Maxwell3D::Regs& regs) {
@@ -1067,9 +1087,7 @@ void RasterizerVulkan::UpdateScissorsState(Tegra::Engines::Maxwell3D::Regs& regs
GetScissorState(regs, 9), GetScissorState(regs, 10), GetScissorState(regs, 11),
GetScissorState(regs, 12), GetScissorState(regs, 13), GetScissorState(regs, 14),
GetScissorState(regs, 15)};
- scheduler.Record([scissors](auto cmdbuf, auto& dld) {
- cmdbuf.setScissor(0, static_cast<u32>(scissors.size()), scissors.data(), dld);
- });
+ scheduler.Record([scissors](vk::CommandBuffer cmdbuf) { cmdbuf.SetScissor(0, scissors); });
}
void RasterizerVulkan::UpdateDepthBias(Tegra::Engines::Maxwell3D::Regs& regs) {
@@ -1077,8 +1095,8 @@ void RasterizerVulkan::UpdateDepthBias(Tegra::Engines::Maxwell3D::Regs& regs) {
return;
}
scheduler.Record([constant = regs.polygon_offset_units, clamp = regs.polygon_offset_clamp,
- factor = regs.polygon_offset_factor](auto cmdbuf, auto& dld) {
- cmdbuf.setDepthBias(constant, clamp, factor / 2.0f, dld);
+ factor = regs.polygon_offset_factor](vk::CommandBuffer cmdbuf) {
+ cmdbuf.SetDepthBias(constant, clamp, factor / 2.0f);
});
}
@@ -1088,9 +1106,8 @@ void RasterizerVulkan::UpdateBlendConstants(Tegra::Engines::Maxwell3D::Regs& reg
}
const std::array blend_color = {regs.blend_color.r, regs.blend_color.g, regs.blend_color.b,
regs.blend_color.a};
- scheduler.Record([blend_color](auto cmdbuf, auto& dld) {
- cmdbuf.setBlendConstants(blend_color.data(), dld);
- });
+ scheduler.Record(
+ [blend_color](vk::CommandBuffer cmdbuf) { cmdbuf.SetBlendConstants(blend_color.data()); });
}
void RasterizerVulkan::UpdateDepthBounds(Tegra::Engines::Maxwell3D::Regs& regs) {
@@ -1098,7 +1115,7 @@ void RasterizerVulkan::UpdateDepthBounds(Tegra::Engines::Maxwell3D::Regs& regs)
return;
}
scheduler.Record([min = regs.depth_bounds[0], max = regs.depth_bounds[1]](
- auto cmdbuf, auto& dld) { cmdbuf.setDepthBounds(min, max, dld); });
+ vk::CommandBuffer cmdbuf) { cmdbuf.SetDepthBounds(min, max); });
}
void RasterizerVulkan::UpdateStencilFaces(Tegra::Engines::Maxwell3D::Regs& regs) {
@@ -1111,24 +1128,24 @@ void RasterizerVulkan::UpdateStencilFaces(Tegra::Engines::Maxwell3D::Regs& regs)
[front_ref = regs.stencil_front_func_ref, front_write_mask = regs.stencil_front_mask,
front_test_mask = regs.stencil_front_func_mask, back_ref = regs.stencil_back_func_ref,
back_write_mask = regs.stencil_back_mask,
- back_test_mask = regs.stencil_back_func_mask](auto cmdbuf, auto& dld) {
+ back_test_mask = regs.stencil_back_func_mask](vk::CommandBuffer cmdbuf) {
// Front face
- cmdbuf.setStencilReference(vk::StencilFaceFlagBits::eFront, front_ref, dld);
- cmdbuf.setStencilWriteMask(vk::StencilFaceFlagBits::eFront, front_write_mask, dld);
- cmdbuf.setStencilCompareMask(vk::StencilFaceFlagBits::eFront, front_test_mask, dld);
+ cmdbuf.SetStencilReference(VK_STENCIL_FACE_FRONT_BIT, front_ref);
+ cmdbuf.SetStencilWriteMask(VK_STENCIL_FACE_FRONT_BIT, front_write_mask);
+ cmdbuf.SetStencilCompareMask(VK_STENCIL_FACE_FRONT_BIT, front_test_mask);
// Back face
- cmdbuf.setStencilReference(vk::StencilFaceFlagBits::eBack, back_ref, dld);
- cmdbuf.setStencilWriteMask(vk::StencilFaceFlagBits::eBack, back_write_mask, dld);
- cmdbuf.setStencilCompareMask(vk::StencilFaceFlagBits::eBack, back_test_mask, dld);
+ cmdbuf.SetStencilReference(VK_STENCIL_FACE_BACK_BIT, back_ref);
+ cmdbuf.SetStencilWriteMask(VK_STENCIL_FACE_BACK_BIT, back_write_mask);
+ cmdbuf.SetStencilCompareMask(VK_STENCIL_FACE_BACK_BIT, back_test_mask);
});
} else {
// Front face defines both faces
scheduler.Record([ref = regs.stencil_back_func_ref, write_mask = regs.stencil_back_mask,
- test_mask = regs.stencil_back_func_mask](auto cmdbuf, auto& dld) {
- cmdbuf.setStencilReference(vk::StencilFaceFlagBits::eFrontAndBack, ref, dld);
- cmdbuf.setStencilWriteMask(vk::StencilFaceFlagBits::eFrontAndBack, write_mask, dld);
- cmdbuf.setStencilCompareMask(vk::StencilFaceFlagBits::eFrontAndBack, test_mask, dld);
+ test_mask = regs.stencil_back_func_mask](vk::CommandBuffer cmdbuf) {
+ cmdbuf.SetStencilReference(VK_STENCIL_FACE_FRONT_AND_BACK, ref);
+ cmdbuf.SetStencilWriteMask(VK_STENCIL_FACE_FRONT_AND_BACK, write_mask);
+ cmdbuf.SetStencilCompareMask(VK_STENCIL_FACE_FRONT_AND_BACK, test_mask);
});
}
}
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.h b/src/video_core/renderer_vulkan/vk_rasterizer.h
index 3185868e9..46037860a 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.h
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.h
@@ -17,7 +17,6 @@
#include "video_core/memory_manager.h"
#include "video_core/rasterizer_accelerated.h"
#include "video_core/rasterizer_interface.h"
-#include "video_core/renderer_vulkan/declarations.h"
#include "video_core/renderer_vulkan/fixed_pipeline_state.h"
#include "video_core/renderer_vulkan/vk_buffer_cache.h"
#include "video_core/renderer_vulkan/vk_compute_pass.h"
@@ -32,6 +31,7 @@
#include "video_core/renderer_vulkan/vk_staging_buffer_pool.h"
#include "video_core/renderer_vulkan/vk_texture_cache.h"
#include "video_core/renderer_vulkan/vk_update_descriptor.h"
+#include "video_core/renderer_vulkan/wrapper.h"
namespace Core {
class System;
@@ -49,11 +49,10 @@ namespace Vulkan {
struct VKScreenInfo;
-using ImageViewsPack =
- boost::container::static_vector<vk::ImageView, Maxwell::NumRenderTargets + 1>;
+using ImageViewsPack = boost::container::static_vector<VkImageView, Maxwell::NumRenderTargets + 1>;
struct FramebufferCacheKey {
- vk::RenderPass renderpass{};
+ VkRenderPass renderpass{};
u32 width = 0;
u32 height = 0;
u32 layers = 0;
@@ -101,7 +100,7 @@ class BufferBindings;
struct ImageView {
View view;
- vk::ImageLayout* layout = nullptr;
+ VkImageLayout* layout = nullptr;
};
class RasterizerVulkan final : public VideoCore::RasterizerAccelerated {
@@ -118,9 +117,9 @@ public:
void ResetCounter(VideoCore::QueryType type) override;
void Query(GPUVAddr gpu_addr, VideoCore::QueryType type, std::optional<u64> timestamp) override;
void FlushAll() override;
- void FlushRegion(CacheAddr addr, u64 size) override;
- void InvalidateRegion(CacheAddr addr, u64 size) override;
- void FlushAndInvalidateRegion(CacheAddr addr, u64 size) override;
+ void FlushRegion(VAddr addr, u64 size) override;
+ void InvalidateRegion(VAddr addr, u64 size) override;
+ void FlushAndInvalidateRegion(VAddr addr, u64 size) override;
void FlushCommands() override;
void TickFrame() override;
bool AccelerateSurfaceCopy(const Tegra::Engines::Fermi2D::Regs::Surface& src,
@@ -137,7 +136,7 @@ public:
private:
struct DrawParameters {
- void Draw(vk::CommandBuffer cmdbuf, const vk::DispatchLoaderDynamic& dld) const;
+ void Draw(vk::CommandBuffer cmdbuf) const;
u32 base_instance = 0;
u32 num_instances = 0;
@@ -154,7 +153,7 @@ private:
Texceptions UpdateAttachments();
- std::tuple<vk::Framebuffer, vk::Extent2D> ConfigureFramebuffers(vk::RenderPass renderpass);
+ std::tuple<VkFramebuffer, VkExtent2D> ConfigureFramebuffers(VkRenderPass renderpass);
/// Setups geometry buffers and state.
DrawParameters SetupGeometry(FixedPipelineState& fixed_state, BufferBindings& buffer_bindings,
@@ -272,7 +271,7 @@ private:
u32 draw_counter = 0;
// TODO(Rodrigo): Invalidate on image destruction
- std::unordered_map<FramebufferCacheKey, UniqueFramebuffer> framebuffer_cache;
+ std::unordered_map<FramebufferCacheKey, vk::Framebuffer> framebuffer_cache;
};
} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_renderpass_cache.cpp b/src/video_core/renderer_vulkan/vk_renderpass_cache.cpp
index 93f5d7ba0..4e5286a69 100644
--- a/src/video_core/renderer_vulkan/vk_renderpass_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_renderpass_cache.cpp
@@ -6,10 +6,10 @@
#include <vector>
#include "video_core/engines/maxwell_3d.h"
-#include "video_core/renderer_vulkan/declarations.h"
#include "video_core/renderer_vulkan/maxwell_to_vk.h"
#include "video_core/renderer_vulkan/vk_device.h"
#include "video_core/renderer_vulkan/vk_renderpass_cache.h"
+#include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan {
@@ -17,7 +17,7 @@ VKRenderPassCache::VKRenderPassCache(const VKDevice& device) : device{device} {}
VKRenderPassCache::~VKRenderPassCache() = default;
-vk::RenderPass VKRenderPassCache::GetRenderPass(const RenderPassParams& params) {
+VkRenderPass VKRenderPassCache::GetRenderPass(const RenderPassParams& params) {
const auto [pair, is_cache_miss] = cache.try_emplace(params);
auto& entry = pair->second;
if (is_cache_miss) {
@@ -26,9 +26,9 @@ vk::RenderPass VKRenderPassCache::GetRenderPass(const RenderPassParams& params)
return *entry;
}
-UniqueRenderPass VKRenderPassCache::CreateRenderPass(const RenderPassParams& params) const {
- std::vector<vk::AttachmentDescription> descriptors;
- std::vector<vk::AttachmentReference> color_references;
+vk::RenderPass VKRenderPassCache::CreateRenderPass(const RenderPassParams& params) const {
+ std::vector<VkAttachmentDescription> descriptors;
+ std::vector<VkAttachmentReference> color_references;
for (std::size_t rt = 0; rt < params.color_attachments.size(); ++rt) {
const auto attachment = params.color_attachments[rt];
@@ -39,16 +39,25 @@ UniqueRenderPass VKRenderPassCache::CreateRenderPass(const RenderPassParams& par
// TODO(Rodrigo): Add eMayAlias when it's needed.
const auto color_layout = attachment.is_texception
- ? vk::ImageLayout::eGeneral
- : vk::ImageLayout::eColorAttachmentOptimal;
- descriptors.emplace_back(vk::AttachmentDescriptionFlagBits::eMayAlias, format.format,
- vk::SampleCountFlagBits::e1, vk::AttachmentLoadOp::eLoad,
- vk::AttachmentStoreOp::eStore, vk::AttachmentLoadOp::eDontCare,
- vk::AttachmentStoreOp::eDontCare, color_layout, color_layout);
- color_references.emplace_back(static_cast<u32>(rt), color_layout);
+ ? VK_IMAGE_LAYOUT_GENERAL
+ : VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+ VkAttachmentDescription& descriptor = descriptors.emplace_back();
+ descriptor.flags = VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT;
+ descriptor.format = format.format;
+ descriptor.samples = VK_SAMPLE_COUNT_1_BIT;
+ descriptor.loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
+ descriptor.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
+ descriptor.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
+ descriptor.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
+ descriptor.initialLayout = color_layout;
+ descriptor.finalLayout = color_layout;
+
+ VkAttachmentReference& reference = color_references.emplace_back();
+ reference.attachment = static_cast<u32>(rt);
+ reference.layout = color_layout;
}
- vk::AttachmentReference zeta_attachment_ref;
+ VkAttachmentReference zeta_attachment_ref;
if (params.has_zeta) {
const auto format =
MaxwellToVK::SurfaceFormat(device, FormatType::Optimal, params.zeta_pixel_format);
@@ -56,45 +65,68 @@ UniqueRenderPass VKRenderPassCache::CreateRenderPass(const RenderPassParams& par
static_cast<u32>(params.zeta_pixel_format));
const auto zeta_layout = params.zeta_texception
- ? vk::ImageLayout::eGeneral
- : vk::ImageLayout::eDepthStencilAttachmentOptimal;
- descriptors.emplace_back(vk::AttachmentDescriptionFlags{}, format.format,
- vk::SampleCountFlagBits::e1, vk::AttachmentLoadOp::eLoad,
- vk::AttachmentStoreOp::eStore, vk::AttachmentLoadOp::eLoad,
- vk::AttachmentStoreOp::eStore, zeta_layout, zeta_layout);
- zeta_attachment_ref =
- vk::AttachmentReference(static_cast<u32>(params.color_attachments.size()), zeta_layout);
+ ? VK_IMAGE_LAYOUT_GENERAL
+ : VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
+ VkAttachmentDescription& descriptor = descriptors.emplace_back();
+ descriptor.flags = 0;
+ descriptor.format = format.format;
+ descriptor.samples = VK_SAMPLE_COUNT_1_BIT;
+ descriptor.loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
+ descriptor.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
+ descriptor.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
+ descriptor.stencilStoreOp = VK_ATTACHMENT_STORE_OP_STORE;
+ descriptor.initialLayout = zeta_layout;
+ descriptor.finalLayout = zeta_layout;
+
+ zeta_attachment_ref.attachment = static_cast<u32>(params.color_attachments.size());
+ zeta_attachment_ref.layout = zeta_layout;
}
- const vk::SubpassDescription subpass_description(
- {}, vk::PipelineBindPoint::eGraphics, 0, nullptr, static_cast<u32>(color_references.size()),
- color_references.data(), nullptr, params.has_zeta ? &zeta_attachment_ref : nullptr, 0,
- nullptr);
-
- vk::AccessFlags access;
- vk::PipelineStageFlags stage;
+ VkSubpassDescription subpass_description;
+ subpass_description.flags = 0;
+ subpass_description.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
+ subpass_description.inputAttachmentCount = 0;
+ subpass_description.pInputAttachments = nullptr;
+ subpass_description.colorAttachmentCount = static_cast<u32>(color_references.size());
+ subpass_description.pColorAttachments = color_references.data();
+ subpass_description.pResolveAttachments = nullptr;
+ subpass_description.pDepthStencilAttachment = params.has_zeta ? &zeta_attachment_ref : nullptr;
+ subpass_description.preserveAttachmentCount = 0;
+ subpass_description.pPreserveAttachments = nullptr;
+
+ VkAccessFlags access = 0;
+ VkPipelineStageFlags stage = 0;
if (!color_references.empty()) {
- access |=
- vk::AccessFlagBits::eColorAttachmentRead | vk::AccessFlagBits::eColorAttachmentWrite;
- stage |= vk::PipelineStageFlagBits::eColorAttachmentOutput;
+ access |= VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
+ stage |= VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
}
if (params.has_zeta) {
- access |= vk::AccessFlagBits::eDepthStencilAttachmentRead |
- vk::AccessFlagBits::eDepthStencilAttachmentWrite;
- stage |= vk::PipelineStageFlagBits::eLateFragmentTests;
+ access |= VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT |
+ VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
+ stage |= VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
}
- const vk::SubpassDependency subpass_dependency(VK_SUBPASS_EXTERNAL, 0, stage, stage, {}, access,
- {});
-
- const vk::RenderPassCreateInfo create_info({}, static_cast<u32>(descriptors.size()),
- descriptors.data(), 1, &subpass_description, 1,
- &subpass_dependency);
-
- const auto dev = device.GetLogical();
- const auto& dld = device.GetDispatchLoader();
- return dev.createRenderPassUnique(create_info, nullptr, dld);
+ VkSubpassDependency subpass_dependency;
+ subpass_dependency.srcSubpass = VK_SUBPASS_EXTERNAL;
+ subpass_dependency.dstSubpass = 0;
+ subpass_dependency.srcStageMask = stage;
+ subpass_dependency.dstStageMask = stage;
+ subpass_dependency.srcAccessMask = 0;
+ subpass_dependency.dstAccessMask = access;
+ subpass_dependency.dependencyFlags = 0;
+
+ VkRenderPassCreateInfo ci;
+ ci.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
+ ci.pNext = nullptr;
+ ci.flags = 0;
+ ci.attachmentCount = static_cast<u32>(descriptors.size());
+ ci.pAttachments = descriptors.data();
+ ci.subpassCount = 1;
+ ci.pSubpasses = &subpass_description;
+ ci.dependencyCount = 1;
+ ci.pDependencies = &subpass_dependency;
+ return device.GetLogical().CreateRenderPass(ci);
}
} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_renderpass_cache.h b/src/video_core/renderer_vulkan/vk_renderpass_cache.h
index b49b2db48..921b6efb5 100644
--- a/src/video_core/renderer_vulkan/vk_renderpass_cache.h
+++ b/src/video_core/renderer_vulkan/vk_renderpass_cache.h
@@ -12,7 +12,7 @@
#include <boost/functional/hash.hpp>
#include "video_core/engines/maxwell_3d.h"
-#include "video_core/renderer_vulkan/declarations.h"
+#include "video_core/renderer_vulkan/wrapper.h"
#include "video_core/surface.h"
namespace Vulkan {
@@ -85,13 +85,13 @@ public:
explicit VKRenderPassCache(const VKDevice& device);
~VKRenderPassCache();
- vk::RenderPass GetRenderPass(const RenderPassParams& params);
+ VkRenderPass GetRenderPass(const RenderPassParams& params);
private:
- UniqueRenderPass CreateRenderPass(const RenderPassParams& params) const;
+ vk::RenderPass CreateRenderPass(const RenderPassParams& params) const;
const VKDevice& device;
- std::unordered_map<RenderPassParams, UniqueRenderPass> cache;
+ std::unordered_map<RenderPassParams, vk::RenderPass> cache;
};
} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_resource_manager.cpp b/src/video_core/renderer_vulkan/vk_resource_manager.cpp
index 525b4bb46..dc06f545a 100644
--- a/src/video_core/renderer_vulkan/vk_resource_manager.cpp
+++ b/src/video_core/renderer_vulkan/vk_resource_manager.cpp
@@ -6,83 +6,83 @@
#include <optional>
#include "common/assert.h"
#include "common/logging/log.h"
-#include "video_core/renderer_vulkan/declarations.h"
#include "video_core/renderer_vulkan/vk_device.h"
#include "video_core/renderer_vulkan/vk_resource_manager.h"
+#include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan {
+namespace {
+
// TODO(Rodrigo): Fine tune these numbers.
constexpr std::size_t COMMAND_BUFFER_POOL_SIZE = 0x1000;
constexpr std::size_t FENCES_GROW_STEP = 0x40;
+VkFenceCreateInfo BuildFenceCreateInfo() {
+ VkFenceCreateInfo fence_ci;
+ fence_ci.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
+ fence_ci.pNext = nullptr;
+ fence_ci.flags = 0;
+ return fence_ci;
+}
+
+} // Anonymous namespace
+
class CommandBufferPool final : public VKFencedPool {
public:
CommandBufferPool(const VKDevice& device)
: VKFencedPool(COMMAND_BUFFER_POOL_SIZE), device{device} {}
void Allocate(std::size_t begin, std::size_t end) override {
- const auto dev = device.GetLogical();
- const auto& dld = device.GetDispatchLoader();
- const u32 graphics_family = device.GetGraphicsFamily();
-
- auto pool = std::make_unique<Pool>();
-
// Command buffers are going to be commited, recorded, executed every single usage cycle.
// They are also going to be reseted when commited.
- const auto pool_flags = vk::CommandPoolCreateFlagBits::eTransient |
- vk::CommandPoolCreateFlagBits::eResetCommandBuffer;
- const vk::CommandPoolCreateInfo cmdbuf_pool_ci(pool_flags, graphics_family);
- pool->handle = dev.createCommandPoolUnique(cmdbuf_pool_ci, nullptr, dld);
-
- const vk::CommandBufferAllocateInfo cmdbuf_ai(*pool->handle,
- vk::CommandBufferLevel::ePrimary,
- static_cast<u32>(COMMAND_BUFFER_POOL_SIZE));
- pool->cmdbufs =
- dev.allocateCommandBuffersUnique<std::allocator<UniqueCommandBuffer>>(cmdbuf_ai, dld);
-
- pools.push_back(std::move(pool));
+ VkCommandPoolCreateInfo command_pool_ci;
+ command_pool_ci.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
+ command_pool_ci.pNext = nullptr;
+ command_pool_ci.flags =
+ VK_COMMAND_POOL_CREATE_TRANSIENT_BIT | VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
+ command_pool_ci.queueFamilyIndex = device.GetGraphicsFamily();
+
+ Pool& pool = pools.emplace_back();
+ pool.handle = device.GetLogical().CreateCommandPool(command_pool_ci);
+ pool.cmdbufs = pool.handle.Allocate(COMMAND_BUFFER_POOL_SIZE);
}
- vk::CommandBuffer Commit(VKFence& fence) {
+ VkCommandBuffer Commit(VKFence& fence) {
const std::size_t index = CommitResource(fence);
const auto pool_index = index / COMMAND_BUFFER_POOL_SIZE;
const auto sub_index = index % COMMAND_BUFFER_POOL_SIZE;
- return *pools[pool_index]->cmdbufs[sub_index];
+ return pools[pool_index].cmdbufs[sub_index];
}
private:
struct Pool {
- UniqueCommandPool handle;
- std::vector<UniqueCommandBuffer> cmdbufs;
+ vk::CommandPool handle;
+ vk::CommandBuffers cmdbufs;
};
const VKDevice& device;
-
- std::vector<std::unique_ptr<Pool>> pools;
+ std::vector<Pool> pools;
};
VKResource::VKResource() = default;
VKResource::~VKResource() = default;
-VKFence::VKFence(const VKDevice& device, UniqueFence handle)
- : device{device}, handle{std::move(handle)} {}
+VKFence::VKFence(const VKDevice& device)
+ : device{device}, handle{device.GetLogical().CreateFence(BuildFenceCreateInfo())} {}
VKFence::~VKFence() = default;
void VKFence::Wait() {
- static constexpr u64 timeout = std::numeric_limits<u64>::max();
- const auto dev = device.GetLogical();
- const auto& dld = device.GetDispatchLoader();
- switch (const auto result = dev.waitForFences(1, &*handle, true, timeout, dld)) {
- case vk::Result::eSuccess:
+ switch (const VkResult result = handle.Wait()) {
+ case VK_SUCCESS:
return;
- case vk::Result::eErrorDeviceLost:
+ case VK_ERROR_DEVICE_LOST:
device.ReportLoss();
[[fallthrough]];
default:
- vk::throwResultException(result, "vk::waitForFences");
+ throw vk::Exception(result);
}
}
@@ -107,13 +107,11 @@ bool VKFence::Tick(bool gpu_wait, bool owner_wait) {
return false;
}
- const auto dev = device.GetLogical();
- const auto& dld = device.GetDispatchLoader();
if (gpu_wait) {
// Wait for the fence if it has been requested.
- dev.waitForFences({*handle}, true, std::numeric_limits<u64>::max(), dld);
+ (void)handle.Wait();
} else {
- if (dev.getFenceStatus(*handle, dld) != vk::Result::eSuccess) {
+ if (handle.GetStatus() != VK_SUCCESS) {
// Vulkan fence is not ready, not much it can do here
return false;
}
@@ -126,7 +124,7 @@ bool VKFence::Tick(bool gpu_wait, bool owner_wait) {
protected_resources.clear();
// Prepare fence for reusage.
- dev.resetFences({*handle}, dld);
+ handle.Reset();
is_used = false;
return true;
}
@@ -299,21 +297,16 @@ VKFence& VKResourceManager::CommitFence() {
return *found_fence;
}
-vk::CommandBuffer VKResourceManager::CommitCommandBuffer(VKFence& fence) {
+VkCommandBuffer VKResourceManager::CommitCommandBuffer(VKFence& fence) {
return command_buffer_pool->Commit(fence);
}
void VKResourceManager::GrowFences(std::size_t new_fences_count) {
- const auto dev = device.GetLogical();
- const auto& dld = device.GetDispatchLoader();
- const vk::FenceCreateInfo fence_ci;
-
const std::size_t previous_size = fences.size();
fences.resize(previous_size + new_fences_count);
- std::generate(fences.begin() + previous_size, fences.end(), [&]() {
- return std::make_unique<VKFence>(device, dev.createFenceUnique(fence_ci, nullptr, dld));
- });
+ std::generate(fences.begin() + previous_size, fences.end(),
+ [this] { return std::make_unique<VKFence>(device); });
}
} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_resource_manager.h b/src/video_core/renderer_vulkan/vk_resource_manager.h
index d4cbc95a5..f683d2276 100644
--- a/src/video_core/renderer_vulkan/vk_resource_manager.h
+++ b/src/video_core/renderer_vulkan/vk_resource_manager.h
@@ -7,7 +7,7 @@
#include <cstddef>
#include <memory>
#include <vector>
-#include "video_core/renderer_vulkan/declarations.h"
+#include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan {
@@ -42,7 +42,7 @@ class VKFence {
friend class VKResourceManager;
public:
- explicit VKFence(const VKDevice& device, UniqueFence handle);
+ explicit VKFence(const VKDevice& device);
~VKFence();
/**
@@ -69,7 +69,7 @@ public:
void RedirectProtection(VKResource* old_resource, VKResource* new_resource) noexcept;
/// Retreives the fence.
- operator vk::Fence() const {
+ operator VkFence() const {
return *handle;
}
@@ -87,7 +87,7 @@ private:
bool Tick(bool gpu_wait, bool owner_wait);
const VKDevice& device; ///< Device handler
- UniqueFence handle; ///< Vulkan fence
+ vk::Fence handle; ///< Vulkan fence
std::vector<VKResource*> protected_resources; ///< List of resources protected by this fence
bool is_owned = false; ///< The fence has been commited but not released yet.
bool is_used = false; ///< The fence has been commited but it has not been checked to be free.
@@ -181,7 +181,7 @@ public:
VKFence& CommitFence();
/// Commits an unused command buffer and protects it with a fence.
- vk::CommandBuffer CommitCommandBuffer(VKFence& fence);
+ VkCommandBuffer CommitCommandBuffer(VKFence& fence);
private:
/// Allocates new fences.
diff --git a/src/video_core/renderer_vulkan/vk_sampler_cache.cpp b/src/video_core/renderer_vulkan/vk_sampler_cache.cpp
index 204b7c39c..07bbcf520 100644
--- a/src/video_core/renderer_vulkan/vk_sampler_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_sampler_cache.cpp
@@ -7,64 +7,64 @@
#include <unordered_map>
#include "common/assert.h"
-#include "video_core/renderer_vulkan/declarations.h"
#include "video_core/renderer_vulkan/maxwell_to_vk.h"
#include "video_core/renderer_vulkan/vk_sampler_cache.h"
+#include "video_core/renderer_vulkan/wrapper.h"
#include "video_core/textures/texture.h"
namespace Vulkan {
-static std::optional<vk::BorderColor> TryConvertBorderColor(std::array<float, 4> color) {
+namespace {
+
+VkBorderColor ConvertBorderColor(std::array<float, 4> color) {
// TODO(Rodrigo): Manage integer border colors
if (color == std::array<float, 4>{0, 0, 0, 0}) {
- return vk::BorderColor::eFloatTransparentBlack;
+ return VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK;
} else if (color == std::array<float, 4>{0, 0, 0, 1}) {
- return vk::BorderColor::eFloatOpaqueBlack;
+ return VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK;
} else if (color == std::array<float, 4>{1, 1, 1, 1}) {
- return vk::BorderColor::eFloatOpaqueWhite;
+ return VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE;
+ }
+ if (color[0] + color[1] + color[2] > 1.35f) {
+ // If color elements are brighter than roughly 0.5 average, use white border
+ return VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE;
+ } else if (color[3] > 0.5f) {
+ return VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK;
} else {
- if (color[0] + color[1] + color[2] > 1.35f) {
- // If color elements are brighter than roughly 0.5 average, use white border
- return vk::BorderColor::eFloatOpaqueWhite;
- }
- if (color[3] > 0.5f) {
- return vk::BorderColor::eFloatOpaqueBlack;
- }
- return vk::BorderColor::eFloatTransparentBlack;
+ return VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK;
}
}
+} // Anonymous namespace
+
VKSamplerCache::VKSamplerCache(const VKDevice& device) : device{device} {}
VKSamplerCache::~VKSamplerCache() = default;
-UniqueSampler VKSamplerCache::CreateSampler(const Tegra::Texture::TSCEntry& tsc) const {
- const float max_anisotropy{tsc.GetMaxAnisotropy()};
- const bool has_anisotropy{max_anisotropy > 1.0f};
-
- const auto border_color{tsc.GetBorderColor()};
- const auto vk_border_color{TryConvertBorderColor(border_color)};
-
- constexpr bool unnormalized_coords{false};
-
- const vk::SamplerCreateInfo sampler_ci(
- {}, MaxwellToVK::Sampler::Filter(tsc.mag_filter),
- MaxwellToVK::Sampler::Filter(tsc.min_filter),
- MaxwellToVK::Sampler::MipmapMode(tsc.mipmap_filter),
- MaxwellToVK::Sampler::WrapMode(device, tsc.wrap_u, tsc.mag_filter),
- MaxwellToVK::Sampler::WrapMode(device, tsc.wrap_v, tsc.mag_filter),
- MaxwellToVK::Sampler::WrapMode(device, tsc.wrap_p, tsc.mag_filter), tsc.GetLodBias(),
- has_anisotropy, max_anisotropy, tsc.depth_compare_enabled,
- MaxwellToVK::Sampler::DepthCompareFunction(tsc.depth_compare_func), tsc.GetMinLod(),
- tsc.GetMaxLod(), vk_border_color.value_or(vk::BorderColor::eFloatTransparentBlack),
- unnormalized_coords);
-
- const auto& dld{device.GetDispatchLoader()};
- const auto dev{device.GetLogical()};
- return dev.createSamplerUnique(sampler_ci, nullptr, dld);
+vk::Sampler VKSamplerCache::CreateSampler(const Tegra::Texture::TSCEntry& tsc) const {
+ VkSamplerCreateInfo ci;
+ ci.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO;
+ ci.pNext = nullptr;
+ ci.flags = 0;
+ ci.magFilter = MaxwellToVK::Sampler::Filter(tsc.mag_filter);
+ ci.minFilter = MaxwellToVK::Sampler::Filter(tsc.min_filter);
+ ci.mipmapMode = MaxwellToVK::Sampler::MipmapMode(tsc.mipmap_filter);
+ ci.addressModeU = MaxwellToVK::Sampler::WrapMode(device, tsc.wrap_u, tsc.mag_filter);
+ ci.addressModeV = MaxwellToVK::Sampler::WrapMode(device, tsc.wrap_v, tsc.mag_filter);
+ ci.addressModeW = MaxwellToVK::Sampler::WrapMode(device, tsc.wrap_p, tsc.mag_filter);
+ ci.mipLodBias = tsc.GetLodBias();
+ ci.anisotropyEnable = tsc.GetMaxAnisotropy() > 1.0f ? VK_TRUE : VK_FALSE;
+ ci.maxAnisotropy = tsc.GetMaxAnisotropy();
+ ci.compareEnable = tsc.depth_compare_enabled;
+ ci.compareOp = MaxwellToVK::Sampler::DepthCompareFunction(tsc.depth_compare_func);
+ ci.minLod = tsc.GetMinLod();
+ ci.maxLod = tsc.GetMaxLod();
+ ci.borderColor = ConvertBorderColor(tsc.GetBorderColor());
+ ci.unnormalizedCoordinates = VK_FALSE;
+ return device.GetLogical().CreateSampler(ci);
}
-vk::Sampler VKSamplerCache::ToSamplerType(const UniqueSampler& sampler) const {
+VkSampler VKSamplerCache::ToSamplerType(const vk::Sampler& sampler) const {
return *sampler;
}
diff --git a/src/video_core/renderer_vulkan/vk_sampler_cache.h b/src/video_core/renderer_vulkan/vk_sampler_cache.h
index 1f73b716b..a33d1c0ee 100644
--- a/src/video_core/renderer_vulkan/vk_sampler_cache.h
+++ b/src/video_core/renderer_vulkan/vk_sampler_cache.h
@@ -4,7 +4,7 @@
#pragma once
-#include "video_core/renderer_vulkan/declarations.h"
+#include "video_core/renderer_vulkan/wrapper.h"
#include "video_core/sampler_cache.h"
#include "video_core/textures/texture.h"
@@ -12,15 +12,15 @@ namespace Vulkan {
class VKDevice;
-class VKSamplerCache final : public VideoCommon::SamplerCache<vk::Sampler, UniqueSampler> {
+class VKSamplerCache final : public VideoCommon::SamplerCache<VkSampler, vk::Sampler> {
public:
explicit VKSamplerCache(const VKDevice& device);
~VKSamplerCache();
protected:
- UniqueSampler CreateSampler(const Tegra::Texture::TSCEntry& tsc) const override;
+ vk::Sampler CreateSampler(const Tegra::Texture::TSCEntry& tsc) const override;
- vk::Sampler ToSamplerType(const UniqueSampler& sampler) const override;
+ VkSampler ToSamplerType(const vk::Sampler& sampler) const override;
private:
const VKDevice& device;
diff --git a/src/video_core/renderer_vulkan/vk_scheduler.cpp b/src/video_core/renderer_vulkan/vk_scheduler.cpp
index b61d4fe63..900f551b3 100644
--- a/src/video_core/renderer_vulkan/vk_scheduler.cpp
+++ b/src/video_core/renderer_vulkan/vk_scheduler.cpp
@@ -10,23 +10,22 @@
#include "common/assert.h"
#include "common/microprofile.h"
-#include "video_core/renderer_vulkan/declarations.h"
#include "video_core/renderer_vulkan/vk_device.h"
#include "video_core/renderer_vulkan/vk_query_cache.h"
#include "video_core/renderer_vulkan/vk_resource_manager.h"
#include "video_core/renderer_vulkan/vk_scheduler.h"
#include "video_core/renderer_vulkan/vk_state_tracker.h"
+#include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan {
MICROPROFILE_DECLARE(Vulkan_WaitForWorker);
-void VKScheduler::CommandChunk::ExecuteAll(vk::CommandBuffer cmdbuf,
- const vk::DispatchLoaderDynamic& dld) {
+void VKScheduler::CommandChunk::ExecuteAll(vk::CommandBuffer cmdbuf) {
auto command = first;
while (command != nullptr) {
auto next = command->GetNext();
- command->Execute(cmdbuf, dld);
+ command->Execute(cmdbuf);
command->~Command();
command = next;
}
@@ -51,7 +50,7 @@ VKScheduler::~VKScheduler() {
worker_thread.join();
}
-void VKScheduler::Flush(bool release_fence, vk::Semaphore semaphore) {
+void VKScheduler::Flush(bool release_fence, VkSemaphore semaphore) {
SubmitExecution(semaphore);
if (release_fence) {
current_fence->Release();
@@ -59,7 +58,7 @@ void VKScheduler::Flush(bool release_fence, vk::Semaphore semaphore) {
AllocateNewContext();
}
-void VKScheduler::Finish(bool release_fence, vk::Semaphore semaphore) {
+void VKScheduler::Finish(bool release_fence, VkSemaphore semaphore) {
SubmitExecution(semaphore);
current_fence->Wait();
if (release_fence) {
@@ -89,17 +88,34 @@ void VKScheduler::DispatchWork() {
AcquireNewChunk();
}
-void VKScheduler::RequestRenderpass(const vk::RenderPassBeginInfo& renderpass_bi) {
- if (state.renderpass && renderpass_bi == *state.renderpass) {
+void VKScheduler::RequestRenderpass(VkRenderPass renderpass, VkFramebuffer framebuffer,
+ VkExtent2D render_area) {
+ if (renderpass == state.renderpass && framebuffer == state.framebuffer &&
+ render_area.width == state.render_area.width &&
+ render_area.height == state.render_area.height) {
return;
}
- const bool end_renderpass = state.renderpass.has_value();
- state.renderpass = renderpass_bi;
- Record([renderpass_bi, end_renderpass](auto cmdbuf, auto& dld) {
+ const bool end_renderpass = state.renderpass != nullptr;
+ state.renderpass = renderpass;
+ state.framebuffer = framebuffer;
+ state.render_area = render_area;
+
+ VkRenderPassBeginInfo renderpass_bi;
+ renderpass_bi.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
+ renderpass_bi.pNext = nullptr;
+ renderpass_bi.renderPass = renderpass;
+ renderpass_bi.framebuffer = framebuffer;
+ renderpass_bi.renderArea.offset.x = 0;
+ renderpass_bi.renderArea.offset.y = 0;
+ renderpass_bi.renderArea.extent = render_area;
+ renderpass_bi.clearValueCount = 0;
+ renderpass_bi.pClearValues = nullptr;
+
+ Record([renderpass_bi, end_renderpass](vk::CommandBuffer cmdbuf) {
if (end_renderpass) {
- cmdbuf.endRenderPass(dld);
+ cmdbuf.EndRenderPass();
}
- cmdbuf.beginRenderPass(renderpass_bi, vk::SubpassContents::eInline, dld);
+ cmdbuf.BeginRenderPass(renderpass_bi, VK_SUBPASS_CONTENTS_INLINE);
});
}
@@ -107,13 +123,13 @@ void VKScheduler::RequestOutsideRenderPassOperationContext() {
EndRenderPass();
}
-void VKScheduler::BindGraphicsPipeline(vk::Pipeline pipeline) {
+void VKScheduler::BindGraphicsPipeline(VkPipeline pipeline) {
if (state.graphics_pipeline == pipeline) {
return;
}
state.graphics_pipeline = pipeline;
- Record([pipeline](auto cmdbuf, auto& dld) {
- cmdbuf.bindPipeline(vk::PipelineBindPoint::eGraphics, pipeline, dld);
+ Record([pipeline](vk::CommandBuffer cmdbuf) {
+ cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline);
});
}
@@ -126,37 +142,50 @@ void VKScheduler::WorkerThread() {
}
auto extracted_chunk = std::move(chunk_queue.Front());
chunk_queue.Pop();
- extracted_chunk->ExecuteAll(current_cmdbuf, device.GetDispatchLoader());
+ extracted_chunk->ExecuteAll(current_cmdbuf);
chunk_reserve.Push(std::move(extracted_chunk));
} while (!quit);
}
-void VKScheduler::SubmitExecution(vk::Semaphore semaphore) {
+void VKScheduler::SubmitExecution(VkSemaphore semaphore) {
EndPendingOperations();
InvalidateState();
WaitWorker();
std::unique_lock lock{mutex};
- const auto queue = device.GetGraphicsQueue();
- const auto& dld = device.GetDispatchLoader();
- current_cmdbuf.end(dld);
+ current_cmdbuf.End();
- const vk::SubmitInfo submit_info(0, nullptr, nullptr, 1, &current_cmdbuf, semaphore ? 1U : 0U,
- &semaphore);
- queue.submit({submit_info}, static_cast<vk::Fence>(*current_fence), dld);
+ VkSubmitInfo submit_info;
+ submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
+ submit_info.pNext = nullptr;
+ submit_info.waitSemaphoreCount = 0;
+ submit_info.pWaitSemaphores = nullptr;
+ submit_info.pWaitDstStageMask = nullptr;
+ submit_info.commandBufferCount = 1;
+ submit_info.pCommandBuffers = current_cmdbuf.address();
+ submit_info.signalSemaphoreCount = semaphore ? 1 : 0;
+ submit_info.pSignalSemaphores = &semaphore;
+ device.GetGraphicsQueue().Submit(submit_info, *current_fence);
}
void VKScheduler::AllocateNewContext() {
++ticks;
+ VkCommandBufferBeginInfo cmdbuf_bi;
+ cmdbuf_bi.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
+ cmdbuf_bi.pNext = nullptr;
+ cmdbuf_bi.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
+ cmdbuf_bi.pInheritanceInfo = nullptr;
+
std::unique_lock lock{mutex};
current_fence = next_fence;
next_fence = &resource_manager.CommitFence();
- current_cmdbuf = resource_manager.CommitCommandBuffer(*current_fence);
- current_cmdbuf.begin({vk::CommandBufferUsageFlagBits::eOneTimeSubmit},
- device.GetDispatchLoader());
+ current_cmdbuf = vk::CommandBuffer(resource_manager.CommitCommandBuffer(*current_fence),
+ device.GetDispatchLoader());
+ current_cmdbuf.Begin(cmdbuf_bi);
+
// Enable counters once again. These are disabled when a command buffer is finished.
if (query_cache) {
query_cache->UpdateCounters();
@@ -177,8 +206,8 @@ void VKScheduler::EndRenderPass() {
if (!state.renderpass) {
return;
}
- state.renderpass = std::nullopt;
- Record([](auto cmdbuf, auto& dld) { cmdbuf.endRenderPass(dld); });
+ state.renderpass = nullptr;
+ Record([](vk::CommandBuffer cmdbuf) { cmdbuf.EndRenderPass(); });
}
void VKScheduler::AcquireNewChunk() {
diff --git a/src/video_core/renderer_vulkan/vk_scheduler.h b/src/video_core/renderer_vulkan/vk_scheduler.h
index c7cc291c3..82a8adc69 100644
--- a/src/video_core/renderer_vulkan/vk_scheduler.h
+++ b/src/video_core/renderer_vulkan/vk_scheduler.h
@@ -13,7 +13,7 @@
#include <utility>
#include "common/common_types.h"
#include "common/threadsafe_queue.h"
-#include "video_core/renderer_vulkan/declarations.h"
+#include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan {
@@ -49,10 +49,10 @@ public:
~VKScheduler();
/// Sends the current execution context to the GPU.
- void Flush(bool release_fence = true, vk::Semaphore semaphore = nullptr);
+ void Flush(bool release_fence = true, VkSemaphore semaphore = nullptr);
/// Sends the current execution context to the GPU and waits for it to complete.
- void Finish(bool release_fence = true, vk::Semaphore semaphore = nullptr);
+ void Finish(bool release_fence = true, VkSemaphore semaphore = nullptr);
/// Waits for the worker thread to finish executing everything. After this function returns it's
/// safe to touch worker resources.
@@ -62,14 +62,15 @@ public:
void DispatchWork();
/// Requests to begin a renderpass.
- void RequestRenderpass(const vk::RenderPassBeginInfo& renderpass_bi);
+ void RequestRenderpass(VkRenderPass renderpass, VkFramebuffer framebuffer,
+ VkExtent2D render_area);
/// Requests the current executino context to be able to execute operations only allowed outside
/// of a renderpass.
void RequestOutsideRenderPassOperationContext();
/// Binds a pipeline to the current execution context.
- void BindGraphicsPipeline(vk::Pipeline pipeline);
+ void BindGraphicsPipeline(VkPipeline pipeline);
/// Assigns the query cache.
void SetQueryCache(VKQueryCache& query_cache_) {
@@ -101,8 +102,7 @@ private:
public:
virtual ~Command() = default;
- virtual void Execute(vk::CommandBuffer cmdbuf,
- const vk::DispatchLoaderDynamic& dld) const = 0;
+ virtual void Execute(vk::CommandBuffer cmdbuf) const = 0;
Command* GetNext() const {
return next;
@@ -125,9 +125,8 @@ private:
TypedCommand(TypedCommand&&) = delete;
TypedCommand& operator=(TypedCommand&&) = delete;
- void Execute(vk::CommandBuffer cmdbuf,
- const vk::DispatchLoaderDynamic& dld) const override {
- command(cmdbuf, dld);
+ void Execute(vk::CommandBuffer cmdbuf) const override {
+ command(cmdbuf);
}
private:
@@ -136,7 +135,7 @@ private:
class CommandChunk final {
public:
- void ExecuteAll(vk::CommandBuffer cmdbuf, const vk::DispatchLoaderDynamic& dld);
+ void ExecuteAll(vk::CommandBuffer cmdbuf);
template <typename T>
bool Record(T& command) {
@@ -175,7 +174,7 @@ private:
void WorkerThread();
- void SubmitExecution(vk::Semaphore semaphore);
+ void SubmitExecution(VkSemaphore semaphore);
void AllocateNewContext();
@@ -198,8 +197,10 @@ private:
VKFence* next_fence = nullptr;
struct State {
- std::optional<vk::RenderPassBeginInfo> renderpass;
- vk::Pipeline graphics_pipeline;
+ VkRenderPass renderpass = nullptr;
+ VkFramebuffer framebuffer = nullptr;
+ VkExtent2D render_area = {0, 0};
+ VkPipeline graphics_pipeline = nullptr;
} state;
std::unique_ptr<CommandChunk> chunk;
diff --git a/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp b/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp
index 24d3ca08f..aaa138f52 100644
--- a/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp
+++ b/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp
@@ -35,7 +35,7 @@ namespace {
using Sirit::Id;
using Tegra::Engines::ShaderType;
using Tegra::Shader::Attribute;
-using Tegra::Shader::AttributeUse;
+using Tegra::Shader::PixelImap;
using Tegra::Shader::Register;
using namespace VideoCommon::Shader;
@@ -752,16 +752,16 @@ private:
if (stage != ShaderType::Fragment) {
continue;
}
- switch (header.ps.GetAttributeUse(location)) {
- case AttributeUse::Constant:
+ switch (header.ps.GetPixelImap(location)) {
+ case PixelImap::Constant:
Decorate(id, spv::Decoration::Flat);
break;
- case AttributeUse::ScreenLinear:
- Decorate(id, spv::Decoration::NoPerspective);
- break;
- case AttributeUse::Perspective:
+ case PixelImap::Perspective:
// Default
break;
+ case PixelImap::ScreenLinear:
+ Decorate(id, spv::Decoration::NoPerspective);
+ break;
default:
UNREACHABLE_MSG("Unused attribute being fetched");
}
@@ -801,7 +801,7 @@ private:
if (IsOutputAttributeArray()) {
const u32 num = GetNumOutputVertices();
type = TypeArray(type, Constant(t_uint, num));
- if (device.GetDriverID() != vk::DriverIdKHR::eIntelProprietaryWindows) {
+ if (device.GetDriverID() != VK_DRIVER_ID_INTEL_PROPRIETARY_WINDOWS_KHR) {
// Intel's proprietary driver fails to setup defaults for arrayed output
// attributes.
varying_default = ConstantComposite(type, std::vector(num, varying_default));
@@ -1145,9 +1145,6 @@ private:
switch (attribute) {
case Attribute::Index::Position: {
if (stage == ShaderType::Fragment) {
- if (element == 3) {
- return {Constant(t_float, 1.0f), Type::Float};
- }
return {OpLoad(t_float, AccessElement(t_in_float, frag_coord, element)),
Type::Float};
}
diff --git a/src/video_core/renderer_vulkan/vk_shader_util.cpp b/src/video_core/renderer_vulkan/vk_shader_util.cpp
index b97c4cb3d..784839327 100644
--- a/src/video_core/renderer_vulkan/vk_shader_util.cpp
+++ b/src/video_core/renderer_vulkan/vk_shader_util.cpp
@@ -8,27 +8,25 @@
#include "common/alignment.h"
#include "common/assert.h"
#include "common/common_types.h"
-#include "video_core/renderer_vulkan/declarations.h"
#include "video_core/renderer_vulkan/vk_device.h"
#include "video_core/renderer_vulkan/vk_shader_util.h"
+#include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan {
-UniqueShaderModule BuildShader(const VKDevice& device, std::size_t code_size, const u8* code_data) {
+vk::ShaderModule BuildShader(const VKDevice& device, std::size_t code_size, const u8* code_data) {
// Avoid undefined behavior by copying to a staging allocation
ASSERT(code_size % sizeof(u32) == 0);
const auto data = std::make_unique<u32[]>(code_size / sizeof(u32));
std::memcpy(data.get(), code_data, code_size);
- const auto dev = device.GetLogical();
- const auto& dld = device.GetDispatchLoader();
- const vk::ShaderModuleCreateInfo shader_ci({}, code_size, data.get());
- vk::ShaderModule shader_module;
- if (dev.createShaderModule(&shader_ci, nullptr, &shader_module, dld) != vk::Result::eSuccess) {
- UNREACHABLE_MSG("Shader module failed to build!");
- }
-
- return UniqueShaderModule(shader_module, vk::ObjectDestroy(dev, nullptr, dld));
+ VkShaderModuleCreateInfo ci;
+ ci.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
+ ci.pNext = nullptr;
+ ci.flags = 0;
+ ci.codeSize = code_size;
+ ci.pCode = data.get();
+ return device.GetLogical().CreateShaderModule(ci);
}
} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_shader_util.h b/src/video_core/renderer_vulkan/vk_shader_util.h
index c06d65970..be38d6697 100644
--- a/src/video_core/renderer_vulkan/vk_shader_util.h
+++ b/src/video_core/renderer_vulkan/vk_shader_util.h
@@ -6,12 +6,12 @@
#include <vector>
#include "common/common_types.h"
-#include "video_core/renderer_vulkan/declarations.h"
+#include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan {
class VKDevice;
-UniqueShaderModule BuildShader(const VKDevice& device, std::size_t code_size, const u8* code_data);
+vk::ShaderModule BuildShader(const VKDevice& device, std::size_t code_size, const u8* code_data);
} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp
index 374959f82..94d954d7a 100644
--- a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp
+++ b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp
@@ -13,6 +13,7 @@
#include "video_core/renderer_vulkan/vk_resource_manager.h"
#include "video_core/renderer_vulkan/vk_scheduler.h"
#include "video_core/renderer_vulkan/vk_staging_buffer_pool.h"
+#include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan {
@@ -71,17 +72,23 @@ VKBuffer* VKStagingBufferPool::TryGetReservedBuffer(std::size_t size, bool host_
}
VKBuffer& VKStagingBufferPool::CreateStagingBuffer(std::size_t size, bool host_visible) {
- const auto usage =
- vk::BufferUsageFlagBits::eTransferSrc | vk::BufferUsageFlagBits::eTransferDst |
- vk::BufferUsageFlagBits::eUniformBuffer | vk::BufferUsageFlagBits::eStorageBuffer |
- vk::BufferUsageFlagBits::eIndexBuffer;
const u32 log2 = Common::Log2Ceil64(size);
- const vk::BufferCreateInfo buffer_ci({}, 1ULL << log2, usage, vk::SharingMode::eExclusive, 0,
- nullptr);
- const auto dev = device.GetLogical();
+
+ VkBufferCreateInfo ci;
+ ci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
+ ci.pNext = nullptr;
+ ci.flags = 0;
+ ci.size = 1ULL << log2;
+ ci.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT |
+ VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT |
+ VK_BUFFER_USAGE_INDEX_BUFFER_BIT;
+ ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
+ ci.queueFamilyIndexCount = 0;
+ ci.pQueueFamilyIndices = nullptr;
+
auto buffer = std::make_unique<VKBuffer>();
- buffer->handle = dev.createBufferUnique(buffer_ci, nullptr, device.GetDispatchLoader());
- buffer->commit = memory_manager.Commit(*buffer->handle, host_visible);
+ buffer->handle = device.GetLogical().CreateBuffer(ci);
+ buffer->commit = memory_manager.Commit(buffer->handle, host_visible);
auto& entries = GetCache(host_visible)[log2].entries;
return *entries.emplace_back(std::move(buffer), scheduler.GetFence(), epoch).buffer;
diff --git a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h
index 4d9488f49..a0840ff8c 100644
--- a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h
+++ b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h
@@ -11,9 +11,9 @@
#include "common/common_types.h"
-#include "video_core/renderer_vulkan/declarations.h"
#include "video_core/renderer_vulkan/vk_memory_manager.h"
#include "video_core/renderer_vulkan/vk_resource_manager.h"
+#include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan {
@@ -22,7 +22,7 @@ class VKFenceWatch;
class VKScheduler;
struct VKBuffer final {
- UniqueBuffer handle;
+ vk::Buffer handle;
VKMemoryCommit commit;
};
diff --git a/src/video_core/renderer_vulkan/vk_stream_buffer.cpp b/src/video_core/renderer_vulkan/vk_stream_buffer.cpp
index d48d3b44c..38a93a01a 100644
--- a/src/video_core/renderer_vulkan/vk_stream_buffer.cpp
+++ b/src/video_core/renderer_vulkan/vk_stream_buffer.cpp
@@ -9,11 +9,11 @@
#include "common/alignment.h"
#include "common/assert.h"
-#include "video_core/renderer_vulkan/declarations.h"
#include "video_core/renderer_vulkan/vk_device.h"
#include "video_core/renderer_vulkan/vk_resource_manager.h"
#include "video_core/renderer_vulkan/vk_scheduler.h"
#include "video_core/renderer_vulkan/vk_stream_buffer.h"
+#include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan {
@@ -25,8 +25,8 @@ constexpr u64 WATCHES_RESERVE_CHUNK = 0x1000;
constexpr u64 STREAM_BUFFER_SIZE = 256 * 1024 * 1024;
std::optional<u32> FindMemoryType(const VKDevice& device, u32 filter,
- vk::MemoryPropertyFlags wanted) {
- const auto properties = device.GetPhysical().getMemoryProperties(device.GetDispatchLoader());
+ VkMemoryPropertyFlags wanted) {
+ const auto properties = device.GetPhysical().GetMemoryProperties();
for (u32 i = 0; i < properties.memoryTypeCount; i++) {
if (!(filter & (1 << i))) {
continue;
@@ -35,13 +35,13 @@ std::optional<u32> FindMemoryType(const VKDevice& device, u32 filter,
return i;
}
}
- return {};
+ return std::nullopt;
}
} // Anonymous namespace
VKStreamBuffer::VKStreamBuffer(const VKDevice& device, VKScheduler& scheduler,
- vk::BufferUsageFlags usage)
+ VkBufferUsageFlags usage)
: device{device}, scheduler{scheduler} {
CreateBuffers(usage);
ReserveWatches(current_watches, WATCHES_INITIAL_RESERVE);
@@ -78,17 +78,13 @@ std::tuple<u8*, u64, bool> VKStreamBuffer::Map(u64 size, u64 alignment) {
invalidated = true;
}
- const auto dev = device.GetLogical();
- const auto& dld = device.GetDispatchLoader();
- const auto pointer = reinterpret_cast<u8*>(dev.mapMemory(*memory, offset, size, {}, dld));
- return {pointer, offset, invalidated};
+ return {memory.Map(offset, size), offset, invalidated};
}
void VKStreamBuffer::Unmap(u64 size) {
ASSERT_MSG(size <= mapped_size, "Reserved size is too small");
- const auto dev = device.GetLogical();
- dev.unmapMemory(*memory, device.GetDispatchLoader());
+ memory.Unmap();
offset += size;
@@ -101,30 +97,42 @@ void VKStreamBuffer::Unmap(u64 size) {
watch.fence.Watch(scheduler.GetFence());
}
-void VKStreamBuffer::CreateBuffers(vk::BufferUsageFlags usage) {
- const vk::BufferCreateInfo buffer_ci({}, STREAM_BUFFER_SIZE, usage, vk::SharingMode::eExclusive,
- 0, nullptr);
- const auto dev = device.GetLogical();
- const auto& dld = device.GetDispatchLoader();
- buffer = dev.createBufferUnique(buffer_ci, nullptr, dld);
+void VKStreamBuffer::CreateBuffers(VkBufferUsageFlags usage) {
+ VkBufferCreateInfo buffer_ci;
+ buffer_ci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
+ buffer_ci.pNext = nullptr;
+ buffer_ci.flags = 0;
+ buffer_ci.size = STREAM_BUFFER_SIZE;
+ buffer_ci.usage = usage;
+ buffer_ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
+ buffer_ci.queueFamilyIndexCount = 0;
+ buffer_ci.pQueueFamilyIndices = nullptr;
+
+ const auto& dev = device.GetLogical();
+ buffer = dev.CreateBuffer(buffer_ci);
- const auto requirements = dev.getBufferMemoryRequirements(*buffer, dld);
+ const auto& dld = device.GetDispatchLoader();
+ const auto requirements = dev.GetBufferMemoryRequirements(*buffer);
// Prefer device local host visible allocations (this should hit AMD's pinned memory).
- auto type = FindMemoryType(device, requirements.memoryTypeBits,
- vk::MemoryPropertyFlagBits::eHostVisible |
- vk::MemoryPropertyFlagBits::eHostCoherent |
- vk::MemoryPropertyFlagBits::eDeviceLocal);
+ auto type =
+ FindMemoryType(device, requirements.memoryTypeBits,
+ VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
+ VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
if (!type) {
// Otherwise search for a host visible allocation.
type = FindMemoryType(device, requirements.memoryTypeBits,
- vk::MemoryPropertyFlagBits::eHostVisible |
- vk::MemoryPropertyFlagBits::eHostCoherent);
+ VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
+ VK_MEMORY_PROPERTY_HOST_COHERENT_BIT);
ASSERT_MSG(type, "No host visible and coherent memory type found");
}
- const vk::MemoryAllocateInfo alloc_ci(requirements.size, *type);
- memory = dev.allocateMemoryUnique(alloc_ci, nullptr, dld);
-
- dev.bindBufferMemory(*buffer, *memory, 0, dld);
+ VkMemoryAllocateInfo memory_ai;
+ memory_ai.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
+ memory_ai.pNext = nullptr;
+ memory_ai.allocationSize = requirements.size;
+ memory_ai.memoryTypeIndex = *type;
+
+ memory = dev.AllocateMemory(memory_ai);
+ buffer.BindMemory(*memory, 0);
}
void VKStreamBuffer::ReserveWatches(std::vector<Watch>& watches, std::size_t grow_size) {
diff --git a/src/video_core/renderer_vulkan/vk_stream_buffer.h b/src/video_core/renderer_vulkan/vk_stream_buffer.h
index 187c0c612..58ce8b973 100644
--- a/src/video_core/renderer_vulkan/vk_stream_buffer.h
+++ b/src/video_core/renderer_vulkan/vk_stream_buffer.h
@@ -9,7 +9,7 @@
#include <vector>
#include "common/common_types.h"
-#include "video_core/renderer_vulkan/declarations.h"
+#include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan {
@@ -21,7 +21,7 @@ class VKScheduler;
class VKStreamBuffer final {
public:
explicit VKStreamBuffer(const VKDevice& device, VKScheduler& scheduler,
- vk::BufferUsageFlags usage);
+ VkBufferUsageFlags usage);
~VKStreamBuffer();
/**
@@ -35,7 +35,7 @@ public:
/// Ensures that "size" bytes of memory are available to the GPU, potentially recording a copy.
void Unmap(u64 size);
- vk::Buffer GetHandle() const {
+ VkBuffer GetHandle() const {
return *buffer;
}
@@ -46,20 +46,18 @@ private:
};
/// Creates Vulkan buffer handles committing the required the required memory.
- void CreateBuffers(vk::BufferUsageFlags usage);
+ void CreateBuffers(VkBufferUsageFlags usage);
/// Increases the amount of watches available.
void ReserveWatches(std::vector<Watch>& watches, std::size_t grow_size);
void WaitPendingOperations(u64 requested_upper_bound);
- const VKDevice& device; ///< Vulkan device manager.
- VKScheduler& scheduler; ///< Command scheduler.
- const vk::AccessFlags access; ///< Access usage of this stream buffer.
- const vk::PipelineStageFlags pipeline_stage; ///< Pipeline usage of this stream buffer.
+ const VKDevice& device; ///< Vulkan device manager.
+ VKScheduler& scheduler; ///< Command scheduler.
- UniqueBuffer buffer; ///< Mapped buffer.
- UniqueDeviceMemory memory; ///< Memory allocation.
+ vk::Buffer buffer; ///< Mapped buffer.
+ vk::DeviceMemory memory; ///< Memory allocation.
u64 offset{}; ///< Buffer iterator.
u64 mapped_size{}; ///< Size reserved for the current copy.
diff --git a/src/video_core/renderer_vulkan/vk_swapchain.cpp b/src/video_core/renderer_vulkan/vk_swapchain.cpp
index 9e73fa9cd..bffd8f32a 100644
--- a/src/video_core/renderer_vulkan/vk_swapchain.cpp
+++ b/src/video_core/renderer_vulkan/vk_swapchain.cpp
@@ -11,69 +11,64 @@
#include "common/logging/log.h"
#include "core/core.h"
#include "core/frontend/framebuffer_layout.h"
-#include "video_core/renderer_vulkan/declarations.h"
#include "video_core/renderer_vulkan/vk_device.h"
#include "video_core/renderer_vulkan/vk_resource_manager.h"
#include "video_core/renderer_vulkan/vk_swapchain.h"
+#include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan {
namespace {
-vk::SurfaceFormatKHR ChooseSwapSurfaceFormat(const std::vector<vk::SurfaceFormatKHR>& formats,
- bool srgb) {
- if (formats.size() == 1 && formats[0].format == vk::Format::eUndefined) {
- vk::SurfaceFormatKHR format;
- format.format = vk::Format::eB8G8R8A8Unorm;
- format.colorSpace = vk::ColorSpaceKHR::eSrgbNonlinear;
+VkSurfaceFormatKHR ChooseSwapSurfaceFormat(vk::Span<VkSurfaceFormatKHR> formats, bool srgb) {
+ if (formats.size() == 1 && formats[0].format == VK_FORMAT_UNDEFINED) {
+ VkSurfaceFormatKHR format;
+ format.format = VK_FORMAT_B8G8R8A8_UNORM;
+ format.colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
return format;
}
const auto& found = std::find_if(formats.begin(), formats.end(), [srgb](const auto& format) {
- const auto request_format = srgb ? vk::Format::eB8G8R8A8Srgb : vk::Format::eB8G8R8A8Unorm;
+ const auto request_format = srgb ? VK_FORMAT_B8G8R8A8_SRGB : VK_FORMAT_B8G8R8A8_UNORM;
return format.format == request_format &&
- format.colorSpace == vk::ColorSpaceKHR::eSrgbNonlinear;
+ format.colorSpace == VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
});
return found != formats.end() ? *found : formats[0];
}
-vk::PresentModeKHR ChooseSwapPresentMode(const std::vector<vk::PresentModeKHR>& modes) {
+VkPresentModeKHR ChooseSwapPresentMode(vk::Span<VkPresentModeKHR> modes) {
// Mailbox doesn't lock the application like fifo (vsync), prefer it
- const auto& found = std::find_if(modes.begin(), modes.end(), [](const auto& mode) {
- return mode == vk::PresentModeKHR::eMailbox;
- });
- return found != modes.end() ? *found : vk::PresentModeKHR::eFifo;
+ const auto found = std::find(modes.begin(), modes.end(), VK_PRESENT_MODE_MAILBOX_KHR);
+ return found != modes.end() ? *found : VK_PRESENT_MODE_FIFO_KHR;
}
-vk::Extent2D ChooseSwapExtent(const vk::SurfaceCapabilitiesKHR& capabilities, u32 width,
- u32 height) {
+VkExtent2D ChooseSwapExtent(const VkSurfaceCapabilitiesKHR& capabilities, u32 width, u32 height) {
constexpr auto undefined_size{std::numeric_limits<u32>::max()};
if (capabilities.currentExtent.width != undefined_size) {
return capabilities.currentExtent;
}
- vk::Extent2D extent = {width, height};
+ VkExtent2D extent;
extent.width = std::max(capabilities.minImageExtent.width,
- std::min(capabilities.maxImageExtent.width, extent.width));
+ std::min(capabilities.maxImageExtent.width, width));
extent.height = std::max(capabilities.minImageExtent.height,
- std::min(capabilities.maxImageExtent.height, extent.height));
+ std::min(capabilities.maxImageExtent.height, height));
return extent;
}
} // Anonymous namespace
-VKSwapchain::VKSwapchain(vk::SurfaceKHR surface, const VKDevice& device)
+VKSwapchain::VKSwapchain(VkSurfaceKHR surface, const VKDevice& device)
: surface{surface}, device{device} {}
VKSwapchain::~VKSwapchain() = default;
void VKSwapchain::Create(u32 width, u32 height, bool srgb) {
- const auto& dld = device.GetDispatchLoader();
const auto physical_device = device.GetPhysical();
- const auto capabilities{physical_device.getSurfaceCapabilitiesKHR(surface, dld)};
+ const auto capabilities{physical_device.GetSurfaceCapabilitiesKHR(surface)};
if (capabilities.maxImageExtent.width == 0 || capabilities.maxImageExtent.height == 0) {
return;
}
- device.GetLogical().waitIdle(dld);
+ device.GetLogical().WaitIdle();
Destroy();
CreateSwapchain(capabilities, width, height, srgb);
@@ -84,10 +79,8 @@ void VKSwapchain::Create(u32 width, u32 height, bool srgb) {
}
void VKSwapchain::AcquireNextImage() {
- const auto dev{device.GetLogical()};
- const auto& dld{device.GetDispatchLoader()};
- dev.acquireNextImageKHR(*swapchain, std::numeric_limits<u64>::max(),
- *present_semaphores[frame_index], {}, &image_index, dld);
+ device.GetLogical().AcquireNextImageKHR(*swapchain, std::numeric_limits<u64>::max(),
+ *present_semaphores[frame_index], {}, &image_index);
if (auto& fence = fences[image_index]; fence) {
fence->Wait();
@@ -96,29 +89,37 @@ void VKSwapchain::AcquireNextImage() {
}
}
-bool VKSwapchain::Present(vk::Semaphore render_semaphore, VKFence& fence) {
- const vk::Semaphore present_semaphore{*present_semaphores[frame_index]};
- const std::array<vk::Semaphore, 2> semaphores{present_semaphore, render_semaphore};
- const u32 wait_semaphore_count{render_semaphore ? 2U : 1U};
- const auto& dld{device.GetDispatchLoader()};
+bool VKSwapchain::Present(VkSemaphore render_semaphore, VKFence& fence) {
+ const VkSemaphore present_semaphore{*present_semaphores[frame_index]};
+ const std::array<VkSemaphore, 2> semaphores{present_semaphore, render_semaphore};
const auto present_queue{device.GetPresentQueue()};
bool recreated = false;
- const vk::PresentInfoKHR present_info(wait_semaphore_count, semaphores.data(), 1,
- &swapchain.get(), &image_index, {});
- switch (const auto result = present_queue.presentKHR(&present_info, dld); result) {
- case vk::Result::eSuccess:
+ VkPresentInfoKHR present_info;
+ present_info.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR;
+ present_info.pNext = nullptr;
+ present_info.waitSemaphoreCount = render_semaphore ? 2U : 1U;
+ present_info.pWaitSemaphores = semaphores.data();
+ present_info.swapchainCount = 1;
+ present_info.pSwapchains = swapchain.address();
+ present_info.pImageIndices = &image_index;
+ present_info.pResults = nullptr;
+
+ switch (const VkResult result = present_queue.Present(present_info)) {
+ case VK_SUCCESS:
+ break;
+ case VK_SUBOPTIMAL_KHR:
+ LOG_DEBUG(Render_Vulkan, "Suboptimal swapchain");
break;
- case vk::Result::eErrorOutOfDateKHR:
+ case VK_ERROR_OUT_OF_DATE_KHR:
if (current_width > 0 && current_height > 0) {
Create(current_width, current_height, current_srgb);
recreated = true;
}
break;
default:
- LOG_CRITICAL(Render_Vulkan, "Vulkan failed to present swapchain due to {}!",
- vk::to_string(result));
- UNREACHABLE();
+ LOG_CRITICAL(Render_Vulkan, "Failed to present with error {}", vk::ToString(result));
+ break;
}
ASSERT(fences[image_index] == nullptr);
@@ -132,74 +133,92 @@ bool VKSwapchain::HasFramebufferChanged(const Layout::FramebufferLayout& framebu
return framebuffer.width != current_width || framebuffer.height != current_height;
}
-void VKSwapchain::CreateSwapchain(const vk::SurfaceCapabilitiesKHR& capabilities, u32 width,
+void VKSwapchain::CreateSwapchain(const VkSurfaceCapabilitiesKHR& capabilities, u32 width,
u32 height, bool srgb) {
- const auto& dld{device.GetDispatchLoader()};
const auto physical_device{device.GetPhysical()};
- const auto formats{physical_device.getSurfaceFormatsKHR(surface, dld)};
- const auto present_modes{physical_device.getSurfacePresentModesKHR(surface, dld)};
+ const auto formats{physical_device.GetSurfaceFormatsKHR(surface)};
+ const auto present_modes{physical_device.GetSurfacePresentModesKHR(surface)};
- const vk::SurfaceFormatKHR surface_format{ChooseSwapSurfaceFormat(formats, srgb)};
- const vk::PresentModeKHR present_mode{ChooseSwapPresentMode(present_modes)};
+ const VkSurfaceFormatKHR surface_format{ChooseSwapSurfaceFormat(formats, srgb)};
+ const VkPresentModeKHR present_mode{ChooseSwapPresentMode(present_modes)};
u32 requested_image_count{capabilities.minImageCount + 1};
if (capabilities.maxImageCount > 0 && requested_image_count > capabilities.maxImageCount) {
requested_image_count = capabilities.maxImageCount;
}
- vk::SwapchainCreateInfoKHR swapchain_ci(
- {}, surface, requested_image_count, surface_format.format, surface_format.colorSpace, {}, 1,
- vk::ImageUsageFlagBits::eColorAttachment, {}, {}, {}, capabilities.currentTransform,
- vk::CompositeAlphaFlagBitsKHR::eOpaque, present_mode, false, {});
+ VkSwapchainCreateInfoKHR swapchain_ci;
+ swapchain_ci.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
+ swapchain_ci.pNext = nullptr;
+ swapchain_ci.flags = 0;
+ swapchain_ci.surface = surface;
+ swapchain_ci.minImageCount = requested_image_count;
+ swapchain_ci.imageFormat = surface_format.format;
+ swapchain_ci.imageColorSpace = surface_format.colorSpace;
+ swapchain_ci.imageArrayLayers = 1;
+ swapchain_ci.imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
+ swapchain_ci.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
+ swapchain_ci.queueFamilyIndexCount = 0;
+ swapchain_ci.pQueueFamilyIndices = nullptr;
+ swapchain_ci.preTransform = capabilities.currentTransform;
+ swapchain_ci.compositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
+ swapchain_ci.presentMode = present_mode;
+ swapchain_ci.clipped = VK_FALSE;
+ swapchain_ci.oldSwapchain = nullptr;
const u32 graphics_family{device.GetGraphicsFamily()};
const u32 present_family{device.GetPresentFamily()};
const std::array<u32, 2> queue_indices{graphics_family, present_family};
if (graphics_family != present_family) {
- swapchain_ci.imageSharingMode = vk::SharingMode::eConcurrent;
+ swapchain_ci.imageSharingMode = VK_SHARING_MODE_CONCURRENT;
swapchain_ci.queueFamilyIndexCount = static_cast<u32>(queue_indices.size());
swapchain_ci.pQueueFamilyIndices = queue_indices.data();
} else {
- swapchain_ci.imageSharingMode = vk::SharingMode::eExclusive;
+ swapchain_ci.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
}
// Request the size again to reduce the possibility of a TOCTOU race condition.
- const auto updated_capabilities = physical_device.getSurfaceCapabilitiesKHR(surface, dld);
+ const auto updated_capabilities = physical_device.GetSurfaceCapabilitiesKHR(surface);
swapchain_ci.imageExtent = ChooseSwapExtent(updated_capabilities, width, height);
// Don't add code within this and the swapchain creation.
- const auto dev{device.GetLogical()};
- swapchain = dev.createSwapchainKHRUnique(swapchain_ci, nullptr, dld);
+ swapchain = device.GetLogical().CreateSwapchainKHR(swapchain_ci);
extent = swapchain_ci.imageExtent;
current_width = extent.width;
current_height = extent.height;
current_srgb = srgb;
- images = dev.getSwapchainImagesKHR(*swapchain, dld);
+ images = swapchain.GetImages();
image_count = static_cast<u32>(images.size());
image_format = surface_format.format;
}
void VKSwapchain::CreateSemaphores() {
- const auto dev{device.GetLogical()};
- const auto& dld{device.GetDispatchLoader()};
-
present_semaphores.resize(image_count);
- for (std::size_t i = 0; i < image_count; i++) {
- present_semaphores[i] = dev.createSemaphoreUnique({}, nullptr, dld);
- }
+ std::generate(present_semaphores.begin(), present_semaphores.end(),
+ [this] { return device.GetLogical().CreateSemaphore(); });
}
void VKSwapchain::CreateImageViews() {
- const auto dev{device.GetLogical()};
- const auto& dld{device.GetDispatchLoader()};
+ VkImageViewCreateInfo ci;
+ ci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
+ ci.pNext = nullptr;
+ ci.flags = 0;
+ // ci.image
+ ci.viewType = VK_IMAGE_VIEW_TYPE_2D;
+ ci.format = image_format;
+ ci.components = {VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY,
+ VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY};
+ ci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
+ ci.subresourceRange.baseMipLevel = 0;
+ ci.subresourceRange.levelCount = 1;
+ ci.subresourceRange.baseArrayLayer = 0;
+ ci.subresourceRange.layerCount = 1;
image_views.resize(image_count);
for (std::size_t i = 0; i < image_count; i++) {
- const vk::ImageViewCreateInfo image_view_ci({}, images[i], vk::ImageViewType::e2D,
- image_format, {},
- {vk::ImageAspectFlagBits::eColor, 0, 1, 0, 1});
- image_views[i] = dev.createImageViewUnique(image_view_ci, nullptr, dld);
+ ci.image = images[i];
+ image_views[i] = device.GetLogical().CreateImageView(ci);
}
}
diff --git a/src/video_core/renderer_vulkan/vk_swapchain.h b/src/video_core/renderer_vulkan/vk_swapchain.h
index 2f3b2ccd5..a35d61345 100644
--- a/src/video_core/renderer_vulkan/vk_swapchain.h
+++ b/src/video_core/renderer_vulkan/vk_swapchain.h
@@ -7,7 +7,7 @@
#include <vector>
#include "common/common_types.h"
-#include "video_core/renderer_vulkan/declarations.h"
+#include "video_core/renderer_vulkan/wrapper.h"
namespace Layout {
struct FramebufferLayout;
@@ -20,7 +20,7 @@ class VKFence;
class VKSwapchain {
public:
- explicit VKSwapchain(vk::SurfaceKHR surface, const VKDevice& device);
+ explicit VKSwapchain(VkSurfaceKHR surface, const VKDevice& device);
~VKSwapchain();
/// Creates (or recreates) the swapchain with a given size.
@@ -31,12 +31,12 @@ public:
/// Presents the rendered image to the swapchain. Returns true when the swapchains had to be
/// recreated. Takes responsability for the ownership of fence.
- bool Present(vk::Semaphore render_semaphore, VKFence& fence);
+ bool Present(VkSemaphore render_semaphore, VKFence& fence);
/// Returns true when the framebuffer layout has changed.
bool HasFramebufferChanged(const Layout::FramebufferLayout& framebuffer) const;
- const vk::Extent2D& GetSize() const {
+ VkExtent2D GetSize() const {
return extent;
}
@@ -48,15 +48,15 @@ public:
return image_index;
}
- vk::Image GetImageIndex(std::size_t index) const {
+ VkImage GetImageIndex(std::size_t index) const {
return images[index];
}
- vk::ImageView GetImageViewIndex(std::size_t index) const {
+ VkImageView GetImageViewIndex(std::size_t index) const {
return *image_views[index];
}
- vk::Format GetImageFormat() const {
+ VkFormat GetImageFormat() const {
return image_format;
}
@@ -65,30 +65,30 @@ public:
}
private:
- void CreateSwapchain(const vk::SurfaceCapabilitiesKHR& capabilities, u32 width, u32 height,
+ void CreateSwapchain(const VkSurfaceCapabilitiesKHR& capabilities, u32 width, u32 height,
bool srgb);
void CreateSemaphores();
void CreateImageViews();
void Destroy();
- const vk::SurfaceKHR surface;
+ const VkSurfaceKHR surface;
const VKDevice& device;
- UniqueSwapchainKHR swapchain;
+ vk::SwapchainKHR swapchain;
std::size_t image_count{};
- std::vector<vk::Image> images;
- std::vector<UniqueImageView> image_views;
- std::vector<UniqueFramebuffer> framebuffers;
+ std::vector<VkImage> images;
+ std::vector<vk::ImageView> image_views;
+ std::vector<vk::Framebuffer> framebuffers;
std::vector<VKFence*> fences;
- std::vector<UniqueSemaphore> present_semaphores;
+ std::vector<vk::Semaphore> present_semaphores;
u32 image_index{};
u32 frame_index{};
- vk::Format image_format{};
- vk::Extent2D extent{};
+ VkFormat image_format{};
+ VkExtent2D extent{};
u32 current_width{};
u32 current_height{};
diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.cpp b/src/video_core/renderer_vulkan/vk_texture_cache.cpp
index 26175921b..de4c23120 100644
--- a/src/video_core/renderer_vulkan/vk_texture_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_texture_cache.cpp
@@ -17,7 +17,6 @@
#include "core/memory.h"
#include "video_core/engines/maxwell_3d.h"
#include "video_core/morton.h"
-#include "video_core/renderer_vulkan/declarations.h"
#include "video_core/renderer_vulkan/maxwell_to_vk.h"
#include "video_core/renderer_vulkan/vk_device.h"
#include "video_core/renderer_vulkan/vk_memory_manager.h"
@@ -25,6 +24,7 @@
#include "video_core/renderer_vulkan/vk_scheduler.h"
#include "video_core/renderer_vulkan/vk_staging_buffer_pool.h"
#include "video_core/renderer_vulkan/vk_texture_cache.h"
+#include "video_core/renderer_vulkan/wrapper.h"
#include "video_core/surface.h"
#include "video_core/textures/convert.h"
@@ -35,23 +35,22 @@ using VideoCore::MortonSwizzleMode;
using Tegra::Texture::SwizzleSource;
using VideoCore::Surface::PixelFormat;
-using VideoCore::Surface::SurfaceCompression;
using VideoCore::Surface::SurfaceTarget;
namespace {
-vk::ImageType SurfaceTargetToImage(SurfaceTarget target) {
+VkImageType SurfaceTargetToImage(SurfaceTarget target) {
switch (target) {
case SurfaceTarget::Texture1D:
case SurfaceTarget::Texture1DArray:
- return vk::ImageType::e1D;
+ return VK_IMAGE_TYPE_1D;
case SurfaceTarget::Texture2D:
case SurfaceTarget::Texture2DArray:
case SurfaceTarget::TextureCubemap:
case SurfaceTarget::TextureCubeArray:
- return vk::ImageType::e2D;
+ return VK_IMAGE_TYPE_2D;
case SurfaceTarget::Texture3D:
- return vk::ImageType::e3D;
+ return VK_IMAGE_TYPE_3D;
case SurfaceTarget::TextureBuffer:
UNREACHABLE();
return {};
@@ -60,35 +59,35 @@ vk::ImageType SurfaceTargetToImage(SurfaceTarget target) {
return {};
}
-vk::ImageAspectFlags PixelFormatToImageAspect(PixelFormat pixel_format) {
+VkImageAspectFlags PixelFormatToImageAspect(PixelFormat pixel_format) {
if (pixel_format < PixelFormat::MaxColorFormat) {
- return vk::ImageAspectFlagBits::eColor;
+ return VK_IMAGE_ASPECT_COLOR_BIT;
} else if (pixel_format < PixelFormat::MaxDepthFormat) {
- return vk::ImageAspectFlagBits::eDepth;
+ return VK_IMAGE_ASPECT_DEPTH_BIT;
} else if (pixel_format < PixelFormat::MaxDepthStencilFormat) {
- return vk::ImageAspectFlagBits::eDepth | vk::ImageAspectFlagBits::eStencil;
+ return VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
} else {
- UNREACHABLE_MSG("Invalid pixel format={}", static_cast<u32>(pixel_format));
- return vk::ImageAspectFlagBits::eColor;
+ UNREACHABLE_MSG("Invalid pixel format={}", static_cast<int>(pixel_format));
+ return VK_IMAGE_ASPECT_COLOR_BIT;
}
}
-vk::ImageViewType GetImageViewType(SurfaceTarget target) {
+VkImageViewType GetImageViewType(SurfaceTarget target) {
switch (target) {
case SurfaceTarget::Texture1D:
- return vk::ImageViewType::e1D;
+ return VK_IMAGE_VIEW_TYPE_1D;
case SurfaceTarget::Texture2D:
- return vk::ImageViewType::e2D;
+ return VK_IMAGE_VIEW_TYPE_2D;
case SurfaceTarget::Texture3D:
- return vk::ImageViewType::e3D;
+ return VK_IMAGE_VIEW_TYPE_3D;
case SurfaceTarget::Texture1DArray:
- return vk::ImageViewType::e1DArray;
+ return VK_IMAGE_VIEW_TYPE_1D_ARRAY;
case SurfaceTarget::Texture2DArray:
- return vk::ImageViewType::e2DArray;
+ return VK_IMAGE_VIEW_TYPE_2D_ARRAY;
case SurfaceTarget::TextureCubemap:
- return vk::ImageViewType::eCube;
+ return VK_IMAGE_VIEW_TYPE_CUBE;
case SurfaceTarget::TextureCubeArray:
- return vk::ImageViewType::eCubeArray;
+ return VK_IMAGE_VIEW_TYPE_CUBE_ARRAY;
case SurfaceTarget::TextureBuffer:
break;
}
@@ -96,71 +95,88 @@ vk::ImageViewType GetImageViewType(SurfaceTarget target) {
return {};
}
-UniqueBuffer CreateBuffer(const VKDevice& device, const SurfaceParams& params) {
+vk::Buffer CreateBuffer(const VKDevice& device, const SurfaceParams& params,
+ std::size_t host_memory_size) {
// TODO(Rodrigo): Move texture buffer creation to the buffer cache
- const vk::BufferCreateInfo buffer_ci({}, params.GetHostSizeInBytes(),
- vk::BufferUsageFlagBits::eUniformTexelBuffer |
- vk::BufferUsageFlagBits::eTransferSrc |
- vk::BufferUsageFlagBits::eTransferDst,
- vk::SharingMode::eExclusive, 0, nullptr);
- const auto dev = device.GetLogical();
- const auto& dld = device.GetDispatchLoader();
- return dev.createBufferUnique(buffer_ci, nullptr, dld);
+ VkBufferCreateInfo ci;
+ ci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
+ ci.pNext = nullptr;
+ ci.flags = 0;
+ ci.size = static_cast<VkDeviceSize>(host_memory_size);
+ ci.usage = VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT |
+ VK_BUFFER_USAGE_TRANSFER_DST_BIT;
+ ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
+ ci.queueFamilyIndexCount = 0;
+ ci.pQueueFamilyIndices = nullptr;
+ return device.GetLogical().CreateBuffer(ci);
}
-vk::BufferViewCreateInfo GenerateBufferViewCreateInfo(const VKDevice& device,
- const SurfaceParams& params,
- vk::Buffer buffer) {
+VkBufferViewCreateInfo GenerateBufferViewCreateInfo(const VKDevice& device,
+ const SurfaceParams& params, VkBuffer buffer,
+ std::size_t host_memory_size) {
ASSERT(params.IsBuffer());
- const auto format =
- MaxwellToVK::SurfaceFormat(device, FormatType::Buffer, params.pixel_format).format;
- return vk::BufferViewCreateInfo({}, buffer, format, 0, params.GetHostSizeInBytes());
+ VkBufferViewCreateInfo ci;
+ ci.sType = VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO;
+ ci.pNext = nullptr;
+ ci.flags = 0;
+ ci.buffer = buffer;
+ ci.format = MaxwellToVK::SurfaceFormat(device, FormatType::Buffer, params.pixel_format).format;
+ ci.offset = 0;
+ ci.range = static_cast<VkDeviceSize>(host_memory_size);
+ return ci;
}
-vk::ImageCreateInfo GenerateImageCreateInfo(const VKDevice& device, const SurfaceParams& params) {
- constexpr auto sample_count = vk::SampleCountFlagBits::e1;
- constexpr auto tiling = vk::ImageTiling::eOptimal;
-
+VkImageCreateInfo GenerateImageCreateInfo(const VKDevice& device, const SurfaceParams& params) {
ASSERT(!params.IsBuffer());
const auto [format, attachable, storage] =
MaxwellToVK::SurfaceFormat(device, FormatType::Optimal, params.pixel_format);
- auto image_usage = vk::ImageUsageFlagBits::eSampled | vk::ImageUsageFlagBits::eTransferDst |
- vk::ImageUsageFlagBits::eTransferSrc;
+ VkImageCreateInfo ci;
+ ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
+ ci.pNext = nullptr;
+ ci.flags = 0;
+ ci.imageType = SurfaceTargetToImage(params.target);
+ ci.format = format;
+ ci.mipLevels = params.num_levels;
+ ci.arrayLayers = static_cast<u32>(params.GetNumLayers());
+ ci.samples = VK_SAMPLE_COUNT_1_BIT;
+ ci.tiling = VK_IMAGE_TILING_OPTIMAL;
+ ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
+ ci.queueFamilyIndexCount = 0;
+ ci.pQueueFamilyIndices = nullptr;
+ ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
+
+ ci.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT |
+ VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
if (attachable) {
- image_usage |= params.IsPixelFormatZeta() ? vk::ImageUsageFlagBits::eDepthStencilAttachment
- : vk::ImageUsageFlagBits::eColorAttachment;
+ ci.usage |= params.IsPixelFormatZeta() ? VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT
+ : VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
}
if (storage) {
- image_usage |= vk::ImageUsageFlagBits::eStorage;
+ ci.usage |= VK_IMAGE_USAGE_STORAGE_BIT;
}
- vk::ImageCreateFlags flags;
- vk::Extent3D extent;
switch (params.target) {
case SurfaceTarget::TextureCubemap:
case SurfaceTarget::TextureCubeArray:
- flags |= vk::ImageCreateFlagBits::eCubeCompatible;
+ ci.flags |= VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT;
[[fallthrough]];
case SurfaceTarget::Texture1D:
case SurfaceTarget::Texture1DArray:
case SurfaceTarget::Texture2D:
case SurfaceTarget::Texture2DArray:
- extent = vk::Extent3D(params.width, params.height, 1);
+ ci.extent = {params.width, params.height, 1};
break;
case SurfaceTarget::Texture3D:
- extent = vk::Extent3D(params.width, params.height, params.depth);
+ ci.extent = {params.width, params.height, params.depth};
break;
case SurfaceTarget::TextureBuffer:
UNREACHABLE();
}
- return vk::ImageCreateInfo(flags, SurfaceTargetToImage(params.target), format, extent,
- params.num_levels, static_cast<u32>(params.GetNumLayers()),
- sample_count, tiling, image_usage, vk::SharingMode::eExclusive, 0,
- nullptr, vk::ImageLayout::eUndefined);
+ return ci;
}
} // Anonymous namespace
@@ -169,19 +185,18 @@ CachedSurface::CachedSurface(Core::System& system, const VKDevice& device,
VKResourceManager& resource_manager, VKMemoryManager& memory_manager,
VKScheduler& scheduler, VKStagingBufferPool& staging_pool,
GPUVAddr gpu_addr, const SurfaceParams& params)
- : SurfaceBase<View>{gpu_addr, params}, system{system}, device{device},
- resource_manager{resource_manager}, memory_manager{memory_manager}, scheduler{scheduler},
- staging_pool{staging_pool} {
+ : SurfaceBase<View>{gpu_addr, params, device.IsOptimalAstcSupported()}, system{system},
+ device{device}, resource_manager{resource_manager},
+ memory_manager{memory_manager}, scheduler{scheduler}, staging_pool{staging_pool} {
if (params.IsBuffer()) {
- buffer = CreateBuffer(device, params);
- commit = memory_manager.Commit(*buffer, false);
+ buffer = CreateBuffer(device, params, host_memory_size);
+ commit = memory_manager.Commit(buffer, false);
- const auto buffer_view_ci = GenerateBufferViewCreateInfo(device, params, *buffer);
+ const auto buffer_view_ci =
+ GenerateBufferViewCreateInfo(device, params, *buffer, host_memory_size);
format = buffer_view_ci.format;
- const auto dev = device.GetLogical();
- const auto& dld = device.GetDispatchLoader();
- buffer_view = dev.createBufferViewUnique(buffer_view_ci, nullptr, dld);
+ buffer_view = device.GetLogical().CreateBufferView(buffer_view_ci);
} else {
const auto image_ci = GenerateImageCreateInfo(device, params);
format = image_ci.format;
@@ -219,16 +234,15 @@ void CachedSurface::DownloadTexture(std::vector<u8>& staging_buffer) {
// We can't copy images to buffers inside a renderpass
scheduler.RequestOutsideRenderPassOperationContext();
- FullTransition(vk::PipelineStageFlagBits::eTransfer, vk::AccessFlagBits::eTransferRead,
- vk::ImageLayout::eTransferSrcOptimal);
+ FullTransition(VK_PIPELINE_STAGE_TRANSFER_BIT, VK_ACCESS_TRANSFER_READ_BIT,
+ VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
const auto& buffer = staging_pool.GetUnusedBuffer(host_memory_size, true);
// TODO(Rodrigo): Do this in a single copy
for (u32 level = 0; level < params.num_levels; ++level) {
- scheduler.Record([image = image->GetHandle(), buffer = *buffer.handle,
- copy = GetBufferImageCopy(level)](auto cmdbuf, auto& dld) {
- cmdbuf.copyImageToBuffer(image, vk::ImageLayout::eTransferSrcOptimal, buffer, {copy},
- dld);
+ scheduler.Record([image = *image->GetHandle(), buffer = *buffer.handle,
+ copy = GetBufferImageCopy(level)](vk::CommandBuffer cmdbuf) {
+ cmdbuf.CopyImageToBuffer(image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, buffer, copy);
});
}
scheduler.Finish();
@@ -255,15 +269,27 @@ void CachedSurface::UploadBuffer(const std::vector<u8>& staging_buffer) {
std::memcpy(src_buffer.commit->Map(host_memory_size), staging_buffer.data(), host_memory_size);
scheduler.Record([src_buffer = *src_buffer.handle, dst_buffer = *buffer,
- size = params.GetHostSizeInBytes()](auto cmdbuf, auto& dld) {
- const vk::BufferCopy copy(0, 0, size);
- cmdbuf.copyBuffer(src_buffer, dst_buffer, {copy}, dld);
-
- cmdbuf.pipelineBarrier(
- vk::PipelineStageFlagBits::eTransfer, vk::PipelineStageFlagBits::eVertexShader, {}, {},
- {vk::BufferMemoryBarrier(vk::AccessFlagBits::eTransferWrite,
- vk::AccessFlagBits::eShaderRead, 0, 0, dst_buffer, 0, size)},
- {}, dld);
+ size = host_memory_size](vk::CommandBuffer cmdbuf) {
+ VkBufferCopy copy;
+ copy.srcOffset = 0;
+ copy.dstOffset = 0;
+ copy.size = size;
+ cmdbuf.CopyBuffer(src_buffer, dst_buffer, copy);
+
+ VkBufferMemoryBarrier barrier;
+ barrier.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER;
+ barrier.pNext = nullptr;
+ barrier.srcAccessMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
+ barrier.dstAccessMask = VK_PIPELINE_STAGE_VERTEX_SHADER_BIT;
+ barrier.srcQueueFamilyIndex = VK_ACCESS_TRANSFER_WRITE_BIT;
+ barrier.dstQueueFamilyIndex = VK_ACCESS_SHADER_READ_BIT;
+ barrier.srcQueueFamilyIndex = 0;
+ barrier.dstQueueFamilyIndex = 0;
+ barrier.buffer = dst_buffer;
+ barrier.offset = 0;
+ barrier.size = size;
+ cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT,
+ 0, {}, barrier, {});
});
}
@@ -271,46 +297,49 @@ void CachedSurface::UploadImage(const std::vector<u8>& staging_buffer) {
const auto& src_buffer = staging_pool.GetUnusedBuffer(host_memory_size, true);
std::memcpy(src_buffer.commit->Map(host_memory_size), staging_buffer.data(), host_memory_size);
- FullTransition(vk::PipelineStageFlagBits::eTransfer, vk::AccessFlagBits::eTransferWrite,
- vk::ImageLayout::eTransferDstOptimal);
+ FullTransition(VK_PIPELINE_STAGE_TRANSFER_BIT, VK_ACCESS_TRANSFER_WRITE_BIT,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
for (u32 level = 0; level < params.num_levels; ++level) {
- vk::BufferImageCopy copy = GetBufferImageCopy(level);
- if (image->GetAspectMask() ==
- (vk::ImageAspectFlagBits::eDepth | vk::ImageAspectFlagBits::eStencil)) {
- vk::BufferImageCopy depth = copy;
- vk::BufferImageCopy stencil = copy;
- depth.imageSubresource.aspectMask = vk::ImageAspectFlagBits::eDepth;
- stencil.imageSubresource.aspectMask = vk::ImageAspectFlagBits::eStencil;
- scheduler.Record([buffer = *src_buffer.handle, image = image->GetHandle(), depth,
- stencil](auto cmdbuf, auto& dld) {
- cmdbuf.copyBufferToImage(buffer, image, vk::ImageLayout::eTransferDstOptimal,
- {depth, stencil}, dld);
+ const VkBufferImageCopy copy = GetBufferImageCopy(level);
+ if (image->GetAspectMask() == (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
+ scheduler.Record([buffer = *src_buffer.handle, image = *image->GetHandle(),
+ copy](vk::CommandBuffer cmdbuf) {
+ std::array<VkBufferImageCopy, 2> copies = {copy, copy};
+ copies[0].imageSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
+ copies[1].imageSubresource.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
+ cmdbuf.CopyBufferToImage(buffer, image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
+ copies);
});
} else {
- scheduler.Record([buffer = *src_buffer.handle, image = image->GetHandle(),
- copy](auto cmdbuf, auto& dld) {
- cmdbuf.copyBufferToImage(buffer, image, vk::ImageLayout::eTransferDstOptimal,
- {copy}, dld);
+ scheduler.Record([buffer = *src_buffer.handle, image = *image->GetHandle(),
+ copy](vk::CommandBuffer cmdbuf) {
+ cmdbuf.CopyBufferToImage(buffer, image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, copy);
});
}
}
}
-vk::BufferImageCopy CachedSurface::GetBufferImageCopy(u32 level) const {
- const u32 vk_depth = params.target == SurfaceTarget::Texture3D ? params.GetMipDepth(level) : 1;
- const auto compression_type = params.GetCompressionType();
- const std::size_t mip_offset = compression_type == SurfaceCompression::Converted
- ? params.GetConvertedMipmapOffset(level)
- : params.GetHostMipmapLevelOffset(level);
-
- return vk::BufferImageCopy(
- mip_offset, 0, 0,
- {image->GetAspectMask(), level, 0, static_cast<u32>(params.GetNumLayers())}, {0, 0, 0},
- {params.GetMipWidth(level), params.GetMipHeight(level), vk_depth});
+VkBufferImageCopy CachedSurface::GetBufferImageCopy(u32 level) const {
+ VkBufferImageCopy copy;
+ copy.bufferOffset = params.GetHostMipmapLevelOffset(level, is_converted);
+ copy.bufferRowLength = 0;
+ copy.bufferImageHeight = 0;
+ copy.imageSubresource.aspectMask = image->GetAspectMask();
+ copy.imageSubresource.mipLevel = level;
+ copy.imageSubresource.baseArrayLayer = 0;
+ copy.imageSubresource.layerCount = static_cast<u32>(params.GetNumLayers());
+ copy.imageOffset.x = 0;
+ copy.imageOffset.y = 0;
+ copy.imageOffset.z = 0;
+ copy.imageExtent.width = params.GetMipWidth(level);
+ copy.imageExtent.height = params.GetMipHeight(level);
+ copy.imageExtent.depth =
+ params.target == SurfaceTarget::Texture3D ? params.GetMipDepth(level) : 1;
+ return copy;
}
-vk::ImageSubresourceRange CachedSurface::GetImageSubresourceRange() const {
+VkImageSubresourceRange CachedSurface::GetImageSubresourceRange() const {
return {image->GetAspectMask(), 0, params.num_levels, 0,
static_cast<u32>(params.GetNumLayers())};
}
@@ -322,12 +351,12 @@ CachedSurfaceView::CachedSurfaceView(const VKDevice& device, CachedSurface& surf
aspect_mask{surface.GetAspectMask()}, device{device}, surface{surface},
base_layer{params.base_layer}, num_layers{params.num_layers}, base_level{params.base_level},
num_levels{params.num_levels}, image_view_type{image ? GetImageViewType(params.target)
- : vk::ImageViewType{}} {}
+ : VK_IMAGE_VIEW_TYPE_1D} {}
CachedSurfaceView::~CachedSurfaceView() = default;
-vk::ImageView CachedSurfaceView::GetHandle(SwizzleSource x_source, SwizzleSource y_source,
- SwizzleSource z_source, SwizzleSource w_source) {
+VkImageView CachedSurfaceView::GetHandle(SwizzleSource x_source, SwizzleSource y_source,
+ SwizzleSource z_source, SwizzleSource w_source) {
const u32 swizzle = EncodeSwizzle(x_source, y_source, z_source, w_source);
if (last_image_view && last_swizzle == swizzle) {
return last_image_view;
@@ -352,37 +381,45 @@ vk::ImageView CachedSurfaceView::GetHandle(SwizzleSource x_source, SwizzleSource
// Games can sample depth or stencil values on textures. This is decided by the swizzle value on
// hardware. To emulate this on Vulkan we specify it in the aspect.
- vk::ImageAspectFlags aspect = aspect_mask;
- if (aspect == (vk::ImageAspectFlagBits::eDepth | vk::ImageAspectFlagBits::eStencil)) {
+ VkImageAspectFlags aspect = aspect_mask;
+ if (aspect == (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
UNIMPLEMENTED_IF(x_source != SwizzleSource::R && x_source != SwizzleSource::G);
const bool is_first = x_source == SwizzleSource::R;
switch (params.pixel_format) {
case VideoCore::Surface::PixelFormat::Z24S8:
case VideoCore::Surface::PixelFormat::Z32FS8:
- aspect = is_first ? vk::ImageAspectFlagBits::eDepth : vk::ImageAspectFlagBits::eStencil;
+ aspect = is_first ? VK_IMAGE_ASPECT_DEPTH_BIT : VK_IMAGE_ASPECT_STENCIL_BIT;
break;
case VideoCore::Surface::PixelFormat::S8Z24:
- aspect = is_first ? vk::ImageAspectFlagBits::eStencil : vk::ImageAspectFlagBits::eDepth;
+ aspect = is_first ? VK_IMAGE_ASPECT_STENCIL_BIT : VK_IMAGE_ASPECT_DEPTH_BIT;
break;
default:
- aspect = vk::ImageAspectFlagBits::eDepth;
+ aspect = VK_IMAGE_ASPECT_DEPTH_BIT;
UNIMPLEMENTED();
}
// Vulkan doesn't seem to understand swizzling of a depth stencil image, use identity
- swizzle_x = vk::ComponentSwizzle::eR;
- swizzle_y = vk::ComponentSwizzle::eG;
- swizzle_z = vk::ComponentSwizzle::eB;
- swizzle_w = vk::ComponentSwizzle::eA;
+ swizzle_x = VK_COMPONENT_SWIZZLE_R;
+ swizzle_y = VK_COMPONENT_SWIZZLE_G;
+ swizzle_z = VK_COMPONENT_SWIZZLE_B;
+ swizzle_w = VK_COMPONENT_SWIZZLE_A;
}
- const vk::ImageViewCreateInfo image_view_ci(
- {}, surface.GetImageHandle(), image_view_type, surface.GetImage().GetFormat(),
- {swizzle_x, swizzle_y, swizzle_z, swizzle_w},
- {aspect, base_level, num_levels, base_layer, num_layers});
+ VkImageViewCreateInfo ci;
+ ci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
+ ci.pNext = nullptr;
+ ci.flags = 0;
+ ci.image = surface.GetImageHandle();
+ ci.viewType = image_view_type;
+ ci.format = surface.GetImage().GetFormat();
+ ci.components = {swizzle_x, swizzle_y, swizzle_z, swizzle_w};
+ ci.subresourceRange.aspectMask = aspect;
+ ci.subresourceRange.baseMipLevel = base_level;
+ ci.subresourceRange.levelCount = num_levels;
+ ci.subresourceRange.baseArrayLayer = base_layer;
+ ci.subresourceRange.layerCount = num_layers;
+ image_view = device.GetLogical().CreateImageView(ci);
- const auto dev = device.GetLogical();
- image_view = dev.createImageViewUnique(image_view_ci, nullptr, device.GetDispatchLoader());
return last_image_view = *image_view;
}
@@ -390,8 +427,9 @@ VKTextureCache::VKTextureCache(Core::System& system, VideoCore::RasterizerInterf
const VKDevice& device, VKResourceManager& resource_manager,
VKMemoryManager& memory_manager, VKScheduler& scheduler,
VKStagingBufferPool& staging_pool)
- : TextureCache(system, rasterizer), device{device}, resource_manager{resource_manager},
- memory_manager{memory_manager}, scheduler{scheduler}, staging_pool{staging_pool} {}
+ : TextureCache(system, rasterizer, device.IsOptimalAstcSupported()), device{device},
+ resource_manager{resource_manager}, memory_manager{memory_manager}, scheduler{scheduler},
+ staging_pool{staging_pool} {}
VKTextureCache::~VKTextureCache() = default;
@@ -418,25 +456,36 @@ void VKTextureCache::ImageCopy(Surface& src_surface, Surface& dst_surface,
scheduler.RequestOutsideRenderPassOperationContext();
src_surface->Transition(copy_params.source_z, copy_params.depth, copy_params.source_level, 1,
- vk::PipelineStageFlagBits::eTransfer, vk::AccessFlagBits::eTransferRead,
- vk::ImageLayout::eTransferSrcOptimal);
- dst_surface->Transition(
- dst_base_layer, num_layers, copy_params.dest_level, 1, vk::PipelineStageFlagBits::eTransfer,
- vk::AccessFlagBits::eTransferWrite, vk::ImageLayout::eTransferDstOptimal);
-
- const vk::ImageSubresourceLayers src_subresource(
- src_surface->GetAspectMask(), copy_params.source_level, copy_params.source_z, num_layers);
- const vk::ImageSubresourceLayers dst_subresource(
- dst_surface->GetAspectMask(), copy_params.dest_level, dst_base_layer, num_layers);
- const vk::Offset3D src_offset(copy_params.source_x, copy_params.source_y, 0);
- const vk::Offset3D dst_offset(copy_params.dest_x, copy_params.dest_y, dst_offset_z);
- const vk::Extent3D extent(copy_params.width, copy_params.height, extent_z);
- const vk::ImageCopy copy(src_subresource, src_offset, dst_subresource, dst_offset, extent);
- const vk::Image src_image = src_surface->GetImageHandle();
- const vk::Image dst_image = dst_surface->GetImageHandle();
- scheduler.Record([src_image, dst_image, copy](auto cmdbuf, auto& dld) {
- cmdbuf.copyImage(src_image, vk::ImageLayout::eTransferSrcOptimal, dst_image,
- vk::ImageLayout::eTransferDstOptimal, {copy}, dld);
+ VK_PIPELINE_STAGE_TRANSFER_BIT, VK_ACCESS_TRANSFER_READ_BIT,
+ VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
+ dst_surface->Transition(dst_base_layer, num_layers, copy_params.dest_level, 1,
+ VK_PIPELINE_STAGE_TRANSFER_BIT, VK_ACCESS_TRANSFER_WRITE_BIT,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
+
+ VkImageCopy copy;
+ copy.srcSubresource.aspectMask = src_surface->GetAspectMask();
+ copy.srcSubresource.mipLevel = copy_params.source_level;
+ copy.srcSubresource.baseArrayLayer = copy_params.source_z;
+ copy.srcSubresource.layerCount = num_layers;
+ copy.srcOffset.x = copy_params.source_x;
+ copy.srcOffset.y = copy_params.source_y;
+ copy.srcOffset.z = 0;
+ copy.dstSubresource.aspectMask = dst_surface->GetAspectMask();
+ copy.dstSubresource.mipLevel = copy_params.dest_level;
+ copy.dstSubresource.baseArrayLayer = dst_base_layer;
+ copy.dstSubresource.layerCount = num_layers;
+ copy.dstOffset.x = copy_params.dest_x;
+ copy.dstOffset.y = copy_params.dest_y;
+ copy.dstOffset.z = dst_offset_z;
+ copy.extent.width = copy_params.width;
+ copy.extent.height = copy_params.height;
+ copy.extent.depth = extent_z;
+
+ const VkImage src_image = src_surface->GetImageHandle();
+ const VkImage dst_image = dst_surface->GetImageHandle();
+ scheduler.Record([src_image, dst_image, copy](vk::CommandBuffer cmdbuf) {
+ cmdbuf.CopyImage(src_image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, dst_image,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, copy);
});
}
@@ -445,25 +494,34 @@ void VKTextureCache::ImageBlit(View& src_view, View& dst_view,
// We can't blit inside a renderpass
scheduler.RequestOutsideRenderPassOperationContext();
- src_view->Transition(vk::ImageLayout::eTransferSrcOptimal, vk::PipelineStageFlagBits::eTransfer,
- vk::AccessFlagBits::eTransferRead);
- dst_view->Transition(vk::ImageLayout::eTransferDstOptimal, vk::PipelineStageFlagBits::eTransfer,
- vk::AccessFlagBits::eTransferWrite);
-
- const auto& cfg = copy_config;
- const auto src_top_left = vk::Offset3D(cfg.src_rect.left, cfg.src_rect.top, 0);
- const auto src_bot_right = vk::Offset3D(cfg.src_rect.right, cfg.src_rect.bottom, 1);
- const auto dst_top_left = vk::Offset3D(cfg.dst_rect.left, cfg.dst_rect.top, 0);
- const auto dst_bot_right = vk::Offset3D(cfg.dst_rect.right, cfg.dst_rect.bottom, 1);
- const vk::ImageBlit blit(src_view->GetImageSubresourceLayers(), {src_top_left, src_bot_right},
- dst_view->GetImageSubresourceLayers(), {dst_top_left, dst_bot_right});
+ src_view->Transition(VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, VK_PIPELINE_STAGE_TRANSFER_BIT,
+ VK_ACCESS_TRANSFER_READ_BIT);
+ dst_view->Transition(VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_PIPELINE_STAGE_TRANSFER_BIT,
+ VK_ACCESS_TRANSFER_WRITE_BIT);
+
+ VkImageBlit blit;
+ blit.srcSubresource = src_view->GetImageSubresourceLayers();
+ blit.srcOffsets[0].x = copy_config.src_rect.left;
+ blit.srcOffsets[0].y = copy_config.src_rect.top;
+ blit.srcOffsets[0].z = 0;
+ blit.srcOffsets[1].x = copy_config.src_rect.right;
+ blit.srcOffsets[1].y = copy_config.src_rect.bottom;
+ blit.srcOffsets[1].z = 1;
+ blit.dstSubresource = dst_view->GetImageSubresourceLayers();
+ blit.dstOffsets[0].x = copy_config.dst_rect.left;
+ blit.dstOffsets[0].y = copy_config.dst_rect.top;
+ blit.dstOffsets[0].z = 0;
+ blit.dstOffsets[1].x = copy_config.dst_rect.right;
+ blit.dstOffsets[1].y = copy_config.dst_rect.bottom;
+ blit.dstOffsets[1].z = 1;
+
const bool is_linear = copy_config.filter == Tegra::Engines::Fermi2D::Filter::Linear;
scheduler.Record([src_image = src_view->GetImage(), dst_image = dst_view->GetImage(), blit,
- is_linear](auto cmdbuf, auto& dld) {
- cmdbuf.blitImage(src_image, vk::ImageLayout::eTransferSrcOptimal, dst_image,
- vk::ImageLayout::eTransferDstOptimal, {blit},
- is_linear ? vk::Filter::eLinear : vk::Filter::eNearest, dld);
+ is_linear](vk::CommandBuffer cmdbuf) {
+ cmdbuf.BlitImage(src_image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, dst_image,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, blit,
+ is_linear ? VK_FILTER_LINEAR : VK_FILTER_NEAREST);
});
}
diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.h b/src/video_core/renderer_vulkan/vk_texture_cache.h
index 22e3d34de..115595f28 100644
--- a/src/video_core/renderer_vulkan/vk_texture_cache.h
+++ b/src/video_core/renderer_vulkan/vk_texture_cache.h
@@ -13,10 +13,10 @@
#include "common/math_util.h"
#include "video_core/gpu.h"
#include "video_core/rasterizer_cache.h"
-#include "video_core/renderer_vulkan/declarations.h"
#include "video_core/renderer_vulkan/vk_image.h"
#include "video_core/renderer_vulkan/vk_memory_manager.h"
#include "video_core/renderer_vulkan/vk_scheduler.h"
+#include "video_core/renderer_vulkan/wrapper.h"
#include "video_core/texture_cache/surface_base.h"
#include "video_core/texture_cache/texture_cache.h"
#include "video_core/textures/decoders.h"
@@ -60,15 +60,15 @@ public:
void UploadTexture(const std::vector<u8>& staging_buffer) override;
void DownloadTexture(std::vector<u8>& staging_buffer) override;
- void FullTransition(vk::PipelineStageFlags new_stage_mask, vk::AccessFlags new_access,
- vk::ImageLayout new_layout) {
+ void FullTransition(VkPipelineStageFlags new_stage_mask, VkAccessFlags new_access,
+ VkImageLayout new_layout) {
image->Transition(0, static_cast<u32>(params.GetNumLayers()), 0, params.num_levels,
new_stage_mask, new_access, new_layout);
}
void Transition(u32 base_layer, u32 num_layers, u32 base_level, u32 num_levels,
- vk::PipelineStageFlags new_stage_mask, vk::AccessFlags new_access,
- vk::ImageLayout new_layout) {
+ VkPipelineStageFlags new_stage_mask, VkAccessFlags new_access,
+ VkImageLayout new_layout) {
image->Transition(base_layer, num_layers, base_level, num_levels, new_stage_mask,
new_access, new_layout);
}
@@ -81,15 +81,15 @@ public:
return *image;
}
- vk::Image GetImageHandle() const {
- return image->GetHandle();
+ VkImage GetImageHandle() const {
+ return *image->GetHandle();
}
- vk::ImageAspectFlags GetAspectMask() const {
+ VkImageAspectFlags GetAspectMask() const {
return image->GetAspectMask();
}
- vk::BufferView GetBufferViewHandle() const {
+ VkBufferView GetBufferViewHandle() const {
return *buffer_view;
}
@@ -104,9 +104,9 @@ private:
void UploadImage(const std::vector<u8>& staging_buffer);
- vk::BufferImageCopy GetBufferImageCopy(u32 level) const;
+ VkBufferImageCopy GetBufferImageCopy(u32 level) const;
- vk::ImageSubresourceRange GetImageSubresourceRange() const;
+ VkImageSubresourceRange GetImageSubresourceRange() const;
Core::System& system;
const VKDevice& device;
@@ -116,11 +116,11 @@ private:
VKStagingBufferPool& staging_pool;
std::optional<VKImage> image;
- UniqueBuffer buffer;
- UniqueBufferView buffer_view;
+ vk::Buffer buffer;
+ vk::BufferView buffer_view;
VKMemoryCommit commit;
- vk::Format format;
+ VkFormat format = VK_FORMAT_UNDEFINED;
};
class CachedSurfaceView final : public VideoCommon::ViewBase {
@@ -129,16 +129,16 @@ public:
const ViewParams& params, bool is_proxy);
~CachedSurfaceView();
- vk::ImageView GetHandle(Tegra::Texture::SwizzleSource x_source,
- Tegra::Texture::SwizzleSource y_source,
- Tegra::Texture::SwizzleSource z_source,
- Tegra::Texture::SwizzleSource w_source);
+ VkImageView GetHandle(Tegra::Texture::SwizzleSource x_source,
+ Tegra::Texture::SwizzleSource y_source,
+ Tegra::Texture::SwizzleSource z_source,
+ Tegra::Texture::SwizzleSource w_source);
bool IsSameSurface(const CachedSurfaceView& rhs) const {
return &surface == &rhs.surface;
}
- vk::ImageView GetHandle() {
+ VkImageView GetHandle() {
return GetHandle(Tegra::Texture::SwizzleSource::R, Tegra::Texture::SwizzleSource::G,
Tegra::Texture::SwizzleSource::B, Tegra::Texture::SwizzleSource::A);
}
@@ -159,24 +159,24 @@ public:
return buffer_view;
}
- vk::Image GetImage() const {
+ VkImage GetImage() const {
return image;
}
- vk::BufferView GetBufferView() const {
+ VkBufferView GetBufferView() const {
return buffer_view;
}
- vk::ImageSubresourceRange GetImageSubresourceRange() const {
+ VkImageSubresourceRange GetImageSubresourceRange() const {
return {aspect_mask, base_level, num_levels, base_layer, num_layers};
}
- vk::ImageSubresourceLayers GetImageSubresourceLayers() const {
+ VkImageSubresourceLayers GetImageSubresourceLayers() const {
return {surface.GetAspectMask(), base_level, base_layer, num_layers};
}
- void Transition(vk::ImageLayout new_layout, vk::PipelineStageFlags new_stage_mask,
- vk::AccessFlags new_access) const {
+ void Transition(VkImageLayout new_layout, VkPipelineStageFlags new_stage_mask,
+ VkAccessFlags new_access) const {
surface.Transition(base_layer, num_layers, base_level, num_levels, new_stage_mask,
new_access, new_layout);
}
@@ -196,9 +196,9 @@ private:
// Store a copy of these values to avoid double dereference when reading them
const SurfaceParams params;
- const vk::Image image;
- const vk::BufferView buffer_view;
- const vk::ImageAspectFlags aspect_mask;
+ const VkImage image;
+ const VkBufferView buffer_view;
+ const VkImageAspectFlags aspect_mask;
const VKDevice& device;
CachedSurface& surface;
@@ -206,12 +206,12 @@ private:
const u32 num_layers;
const u32 base_level;
const u32 num_levels;
- const vk::ImageViewType image_view_type;
+ const VkImageViewType image_view_type;
- vk::ImageView last_image_view;
- u32 last_swizzle{};
+ VkImageView last_image_view = nullptr;
+ u32 last_swizzle = 0;
- std::unordered_map<u32, UniqueImageView> view_cache;
+ std::unordered_map<u32, vk::ImageView> view_cache;
};
class VKTextureCache final : public TextureCacheBase {
diff --git a/src/video_core/renderer_vulkan/vk_update_descriptor.cpp b/src/video_core/renderer_vulkan/vk_update_descriptor.cpp
index 0e577b9ff..4bfec0077 100644
--- a/src/video_core/renderer_vulkan/vk_update_descriptor.cpp
+++ b/src/video_core/renderer_vulkan/vk_update_descriptor.cpp
@@ -7,10 +7,10 @@
#include "common/assert.h"
#include "common/logging/log.h"
-#include "video_core/renderer_vulkan/declarations.h"
#include "video_core/renderer_vulkan/vk_device.h"
#include "video_core/renderer_vulkan/vk_scheduler.h"
#include "video_core/renderer_vulkan/vk_update_descriptor.h"
+#include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan {
@@ -27,8 +27,8 @@ void VKUpdateDescriptorQueue::Acquire() {
entries.clear();
}
-void VKUpdateDescriptorQueue::Send(vk::DescriptorUpdateTemplate update_template,
- vk::DescriptorSet set) {
+void VKUpdateDescriptorQueue::Send(VkDescriptorUpdateTemplateKHR update_template,
+ VkDescriptorSet set) {
if (payload.size() + entries.size() >= payload.max_size()) {
LOG_WARNING(Render_Vulkan, "Payload overflow, waiting for worker thread");
scheduler.WaitWorker();
@@ -37,21 +37,21 @@ void VKUpdateDescriptorQueue::Send(vk::DescriptorUpdateTemplate update_template,
const auto payload_start = payload.data() + payload.size();
for (const auto& entry : entries) {
- if (const auto image = std::get_if<vk::DescriptorImageInfo>(&entry)) {
+ if (const auto image = std::get_if<VkDescriptorImageInfo>(&entry)) {
payload.push_back(*image);
} else if (const auto buffer = std::get_if<Buffer>(&entry)) {
payload.emplace_back(*buffer->buffer, buffer->offset, buffer->size);
- } else if (const auto texel = std::get_if<vk::BufferView>(&entry)) {
+ } else if (const auto texel = std::get_if<VkBufferView>(&entry)) {
payload.push_back(*texel);
} else {
UNREACHABLE();
}
}
- scheduler.Record([dev = device.GetLogical(), payload_start, set,
- update_template]([[maybe_unused]] auto cmdbuf, auto& dld) {
- dev.updateDescriptorSetWithTemplate(set, update_template, payload_start, dld);
- });
+ scheduler.Record(
+ [payload_start, set, update_template, logical = &device.GetLogical()](vk::CommandBuffer) {
+ logical->UpdateDescriptorSet(set, update_template, payload_start);
+ });
}
} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_update_descriptor.h b/src/video_core/renderer_vulkan/vk_update_descriptor.h
index 8c825aa29..a9e3d5dba 100644
--- a/src/video_core/renderer_vulkan/vk_update_descriptor.h
+++ b/src/video_core/renderer_vulkan/vk_update_descriptor.h
@@ -9,7 +9,7 @@
#include <boost/container/static_vector.hpp>
#include "common/common_types.h"
-#include "video_core/renderer_vulkan/declarations.h"
+#include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan {
@@ -20,18 +20,18 @@ class DescriptorUpdateEntry {
public:
explicit DescriptorUpdateEntry() : image{} {}
- DescriptorUpdateEntry(vk::DescriptorImageInfo image) : image{image} {}
+ DescriptorUpdateEntry(VkDescriptorImageInfo image) : image{image} {}
- DescriptorUpdateEntry(vk::Buffer buffer, vk::DeviceSize offset, vk::DeviceSize size)
+ DescriptorUpdateEntry(VkBuffer buffer, VkDeviceSize offset, VkDeviceSize size)
: buffer{buffer, offset, size} {}
- DescriptorUpdateEntry(vk::BufferView texel_buffer) : texel_buffer{texel_buffer} {}
+ DescriptorUpdateEntry(VkBufferView texel_buffer) : texel_buffer{texel_buffer} {}
private:
union {
- vk::DescriptorImageInfo image;
- vk::DescriptorBufferInfo buffer;
- vk::BufferView texel_buffer;
+ VkDescriptorImageInfo image;
+ VkDescriptorBufferInfo buffer;
+ VkBufferView texel_buffer;
};
};
@@ -44,37 +44,35 @@ public:
void Acquire();
- void Send(vk::DescriptorUpdateTemplate update_template, vk::DescriptorSet set);
+ void Send(VkDescriptorUpdateTemplateKHR update_template, VkDescriptorSet set);
- void AddSampledImage(vk::Sampler sampler, vk::ImageView image_view) {
- entries.emplace_back(vk::DescriptorImageInfo{sampler, image_view, {}});
+ void AddSampledImage(VkSampler sampler, VkImageView image_view) {
+ entries.emplace_back(VkDescriptorImageInfo{sampler, image_view, {}});
}
- void AddImage(vk::ImageView image_view) {
- entries.emplace_back(vk::DescriptorImageInfo{{}, image_view, {}});
+ void AddImage(VkImageView image_view) {
+ entries.emplace_back(VkDescriptorImageInfo{{}, image_view, {}});
}
- void AddBuffer(const vk::Buffer* buffer, u64 offset, std::size_t size) {
+ void AddBuffer(const VkBuffer* buffer, u64 offset, std::size_t size) {
entries.push_back(Buffer{buffer, offset, size});
}
- void AddTexelBuffer(vk::BufferView texel_buffer) {
+ void AddTexelBuffer(VkBufferView texel_buffer) {
entries.emplace_back(texel_buffer);
}
- vk::ImageLayout* GetLastImageLayout() {
- return &std::get<vk::DescriptorImageInfo>(entries.back()).imageLayout;
+ VkImageLayout* GetLastImageLayout() {
+ return &std::get<VkDescriptorImageInfo>(entries.back()).imageLayout;
}
private:
struct Buffer {
- const vk::Buffer* buffer{};
- u64 offset{};
- std::size_t size{};
+ const VkBuffer* buffer = nullptr;
+ u64 offset = 0;
+ std::size_t size = 0;
};
- using Variant = std::variant<vk::DescriptorImageInfo, Buffer, vk::BufferView>;
- // Old gcc versions don't consider this trivially copyable.
- // static_assert(std::is_trivially_copyable_v<Variant>);
+ using Variant = std::variant<VkDescriptorImageInfo, Buffer, VkBufferView>;
const VKDevice& device;
VKScheduler& scheduler;
diff --git a/src/video_core/shader/decode/arithmetic.cpp b/src/video_core/shader/decode/arithmetic.cpp
index 478394682..4db329fa5 100644
--- a/src/video_core/shader/decode/arithmetic.cpp
+++ b/src/video_core/shader/decode/arithmetic.cpp
@@ -136,7 +136,8 @@ u32 ShaderIR::DecodeArithmetic(NodeBlock& bb, u32 pc) {
SetRegister(bb, instr.gpr0, value);
break;
}
- case OpCode::Id::FCMP_R: {
+ case OpCode::Id::FCMP_RR:
+ case OpCode::Id::FCMP_RC: {
UNIMPLEMENTED_IF(instr.fcmp.ftz == 0);
Node op_c = GetRegister(instr.gpr39);
Node comp = GetPredicateComparisonFloat(instr.fcmp.cond, std::move(op_c), Immediate(0.0f));
diff --git a/src/video_core/shader/decode/conversion.cpp b/src/video_core/shader/decode/conversion.cpp
index c72690b2b..b9989c88c 100644
--- a/src/video_core/shader/decode/conversion.cpp
+++ b/src/video_core/shader/decode/conversion.cpp
@@ -2,6 +2,10 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
+#include <limits>
+#include <optional>
+#include <utility>
+
#include "common/assert.h"
#include "common/common_types.h"
#include "video_core/engines/shader_bytecode.h"
@@ -15,9 +19,49 @@ using Tegra::Shader::OpCode;
using Tegra::Shader::Register;
namespace {
+
constexpr OperationCode GetFloatSelector(u64 selector) {
return selector == 0 ? OperationCode::FCastHalf0 : OperationCode::FCastHalf1;
}
+
+constexpr u32 SizeInBits(Register::Size size) {
+ switch (size) {
+ case Register::Size::Byte:
+ return 8;
+ case Register::Size::Short:
+ return 16;
+ case Register::Size::Word:
+ return 32;
+ case Register::Size::Long:
+ return 64;
+ }
+ return 0;
+}
+
+constexpr std::optional<std::pair<s32, s32>> IntegerSaturateBounds(Register::Size src_size,
+ Register::Size dst_size,
+ bool src_signed,
+ bool dst_signed) {
+ const u32 dst_bits = SizeInBits(dst_size);
+ if (src_size == Register::Size::Word && dst_size == Register::Size::Word) {
+ if (src_signed == dst_signed) {
+ return std::nullopt;
+ }
+ return std::make_pair(0, std::numeric_limits<s32>::max());
+ }
+ if (dst_signed) {
+ // Signed destination, clamp to [-128, 127] for instance
+ return std::make_pair(-(1 << (dst_bits - 1)), (1 << (dst_bits - 1)) - 1);
+ } else {
+ // Unsigned destination
+ if (dst_bits == 32) {
+ // Avoid shifting by 32, that is undefined behavior
+ return std::make_pair(0, s32(std::numeric_limits<u32>::max()));
+ }
+ return std::make_pair(0, (1 << dst_bits) - 1);
+ }
+}
+
} // Anonymous namespace
u32 ShaderIR::DecodeConversion(NodeBlock& bb, u32 pc) {
@@ -28,14 +72,13 @@ u32 ShaderIR::DecodeConversion(NodeBlock& bb, u32 pc) {
case OpCode::Id::I2I_R:
case OpCode::Id::I2I_C:
case OpCode::Id::I2I_IMM: {
- UNIMPLEMENTED_IF(instr.conversion.int_src.selector != 0);
- UNIMPLEMENTED_IF(instr.conversion.dst_size != Register::Size::Word);
- UNIMPLEMENTED_IF(instr.alu.saturate_d);
+ const bool src_signed = instr.conversion.is_input_signed;
+ const bool dst_signed = instr.conversion.is_output_signed;
+ const Register::Size src_size = instr.conversion.src_size;
+ const Register::Size dst_size = instr.conversion.dst_size;
+ const u32 selector = static_cast<u32>(instr.conversion.int_src.selector);
- const bool input_signed = instr.conversion.is_input_signed;
- const bool output_signed = instr.conversion.is_output_signed;
-
- Node value = [&]() {
+ Node value = [this, instr, opcode] {
switch (opcode->get().GetId()) {
case OpCode::Id::I2I_R:
return GetRegister(instr.gpr20);
@@ -48,16 +91,60 @@ u32 ShaderIR::DecodeConversion(NodeBlock& bb, u32 pc) {
return Immediate(0);
}
}();
- value = ConvertIntegerSize(value, instr.conversion.src_size, input_signed);
- value = GetOperandAbsNegInteger(value, instr.conversion.abs_a, instr.conversion.negate_a,
- input_signed);
- if (input_signed != output_signed) {
- value = SignedOperation(OperationCode::ICastUnsigned, output_signed, NO_PRECISE, value);
+ // Ensure the source selector is valid
+ switch (instr.conversion.src_size) {
+ case Register::Size::Byte:
+ break;
+ case Register::Size::Short:
+ ASSERT(selector == 0 || selector == 2);
+ break;
+ default:
+ ASSERT(selector == 0);
+ break;
+ }
+
+ if (src_size != Register::Size::Word || selector != 0) {
+ value = SignedOperation(OperationCode::IBitfieldExtract, src_signed, std::move(value),
+ Immediate(selector * 8), Immediate(SizeInBits(src_size)));
+ }
+
+ value = GetOperandAbsNegInteger(std::move(value), instr.conversion.abs_a,
+ instr.conversion.negate_a, src_signed);
+
+ if (instr.alu.saturate_d) {
+ if (src_signed && !dst_signed) {
+ Node is_negative = Operation(OperationCode::LogicalUGreaterEqual, value,
+ Immediate(1 << (SizeInBits(src_size) - 1)));
+ value = Operation(OperationCode::Select, std::move(is_negative), Immediate(0),
+ std::move(value));
+
+ // Simplify generated expressions, this can be removed without semantic impact
+ SetTemporary(bb, 0, std::move(value));
+ value = GetTemporary(0);
+
+ if (dst_size != Register::Size::Word) {
+ const Node limit = Immediate((1 << SizeInBits(dst_size)) - 1);
+ Node is_large =
+ Operation(OperationCode::LogicalUGreaterThan, std::move(value), limit);
+ value = Operation(OperationCode::Select, std::move(is_large), limit,
+ std::move(value));
+ }
+ } else if (const std::optional bounds =
+ IntegerSaturateBounds(src_size, dst_size, src_signed, dst_signed)) {
+ value = SignedOperation(OperationCode::IMax, src_signed, std::move(value),
+ Immediate(bounds->first));
+ value = SignedOperation(OperationCode::IMin, src_signed, std::move(value),
+ Immediate(bounds->second));
+ }
+ } else if (dst_size != Register::Size::Word) {
+ // No saturation, we only have to mask the result
+ Node mask = Immediate((1 << SizeInBits(dst_size)) - 1);
+ value = Operation(OperationCode::UBitwiseAnd, std::move(value), std::move(mask));
}
SetInternalFlagsFromInteger(bb, value, instr.generates_cc);
- SetRegister(bb, instr.gpr0, value);
+ SetRegister(bb, instr.gpr0, std::move(value));
break;
}
case OpCode::Id::I2F_R:
diff --git a/src/video_core/shader/decode/image.cpp b/src/video_core/shader/decode/image.cpp
index d2fe4ec5d..0dd7a1196 100644
--- a/src/video_core/shader/decode/image.cpp
+++ b/src/video_core/shader/decode/image.cpp
@@ -13,13 +13,247 @@
#include "video_core/engines/shader_bytecode.h"
#include "video_core/shader/node_helper.h"
#include "video_core/shader/shader_ir.h"
+#include "video_core/textures/texture.h"
namespace VideoCommon::Shader {
using Tegra::Shader::Instruction;
using Tegra::Shader::OpCode;
+using Tegra::Shader::PredCondition;
+using Tegra::Shader::StoreType;
+using Tegra::Texture::ComponentType;
+using Tegra::Texture::TextureFormat;
+using Tegra::Texture::TICEntry;
namespace {
+
+ComponentType GetComponentType(Tegra::Engines::SamplerDescriptor descriptor,
+ std::size_t component) {
+ const TextureFormat format{descriptor.format};
+ switch (format) {
+ case TextureFormat::R16_G16_B16_A16:
+ case TextureFormat::R32_G32_B32_A32:
+ case TextureFormat::R32_G32_B32:
+ case TextureFormat::R32_G32:
+ case TextureFormat::R16_G16:
+ case TextureFormat::R32:
+ case TextureFormat::R16:
+ case TextureFormat::R8:
+ case TextureFormat::R1:
+ if (component == 0) {
+ return descriptor.r_type;
+ }
+ if (component == 1) {
+ return descriptor.g_type;
+ }
+ if (component == 2) {
+ return descriptor.b_type;
+ }
+ if (component == 3) {
+ return descriptor.a_type;
+ }
+ break;
+ case TextureFormat::A8R8G8B8:
+ if (component == 0) {
+ return descriptor.a_type;
+ }
+ if (component == 1) {
+ return descriptor.r_type;
+ }
+ if (component == 2) {
+ return descriptor.g_type;
+ }
+ if (component == 3) {
+ return descriptor.b_type;
+ }
+ break;
+ case TextureFormat::A2B10G10R10:
+ case TextureFormat::A4B4G4R4:
+ case TextureFormat::A5B5G5R1:
+ case TextureFormat::A1B5G5R5:
+ if (component == 0) {
+ return descriptor.a_type;
+ }
+ if (component == 1) {
+ return descriptor.b_type;
+ }
+ if (component == 2) {
+ return descriptor.g_type;
+ }
+ if (component == 3) {
+ return descriptor.r_type;
+ }
+ break;
+ case TextureFormat::R32_B24G8:
+ if (component == 0) {
+ return descriptor.r_type;
+ }
+ if (component == 1) {
+ return descriptor.b_type;
+ }
+ if (component == 2) {
+ return descriptor.g_type;
+ }
+ break;
+ case TextureFormat::B5G6R5:
+ case TextureFormat::B6G5R5:
+ if (component == 0) {
+ return descriptor.b_type;
+ }
+ if (component == 1) {
+ return descriptor.g_type;
+ }
+ if (component == 2) {
+ return descriptor.r_type;
+ }
+ break;
+ case TextureFormat::G8R24:
+ case TextureFormat::G24R8:
+ case TextureFormat::G8R8:
+ case TextureFormat::G4R4:
+ if (component == 0) {
+ return descriptor.g_type;
+ }
+ if (component == 1) {
+ return descriptor.r_type;
+ }
+ break;
+ }
+ UNIMPLEMENTED_MSG("texture format not implement={}", format);
+ return ComponentType::FLOAT;
+}
+
+bool IsComponentEnabled(std::size_t component_mask, std::size_t component) {
+ constexpr u8 R = 0b0001;
+ constexpr u8 G = 0b0010;
+ constexpr u8 B = 0b0100;
+ constexpr u8 A = 0b1000;
+ constexpr std::array<u8, 16> mask = {
+ 0, (R), (G), (R | G), (B), (R | B), (G | B), (R | G | B),
+ (A), (R | A), (G | A), (R | G | A), (B | A), (R | B | A), (G | B | A), (R | G | B | A)};
+ return std::bitset<4>{mask.at(component_mask)}.test(component);
+}
+
+u32 GetComponentSize(TextureFormat format, std::size_t component) {
+ switch (format) {
+ case TextureFormat::R32_G32_B32_A32:
+ return 32;
+ case TextureFormat::R16_G16_B16_A16:
+ return 16;
+ case TextureFormat::R32_G32_B32:
+ return component <= 2 ? 32 : 0;
+ case TextureFormat::R32_G32:
+ return component <= 1 ? 32 : 0;
+ case TextureFormat::R16_G16:
+ return component <= 1 ? 16 : 0;
+ case TextureFormat::R32:
+ return component == 0 ? 32 : 0;
+ case TextureFormat::R16:
+ return component == 0 ? 16 : 0;
+ case TextureFormat::R8:
+ return component == 0 ? 8 : 0;
+ case TextureFormat::R1:
+ return component == 0 ? 1 : 0;
+ case TextureFormat::A8R8G8B8:
+ return 8;
+ case TextureFormat::A2B10G10R10:
+ return (component == 3 || component == 2 || component == 1) ? 10 : 2;
+ case TextureFormat::A4B4G4R4:
+ return 4;
+ case TextureFormat::A5B5G5R1:
+ return (component == 0 || component == 1 || component == 2) ? 5 : 1;
+ case TextureFormat::A1B5G5R5:
+ return (component == 1 || component == 2 || component == 3) ? 5 : 1;
+ case TextureFormat::R32_B24G8:
+ if (component == 0) {
+ return 32;
+ }
+ if (component == 1) {
+ return 24;
+ }
+ if (component == 2) {
+ return 8;
+ }
+ return 0;
+ case TextureFormat::B5G6R5:
+ if (component == 0 || component == 2) {
+ return 5;
+ }
+ if (component == 1) {
+ return 6;
+ }
+ return 0;
+ case TextureFormat::B6G5R5:
+ if (component == 1 || component == 2) {
+ return 5;
+ }
+ if (component == 0) {
+ return 6;
+ }
+ return 0;
+ case TextureFormat::G8R24:
+ if (component == 0) {
+ return 8;
+ }
+ if (component == 1) {
+ return 24;
+ }
+ return 0;
+ case TextureFormat::G24R8:
+ if (component == 0) {
+ return 8;
+ }
+ if (component == 1) {
+ return 24;
+ }
+ return 0;
+ case TextureFormat::G8R8:
+ return (component == 0 || component == 1) ? 8 : 0;
+ case TextureFormat::G4R4:
+ return (component == 0 || component == 1) ? 4 : 0;
+ default:
+ UNIMPLEMENTED_MSG("texture format not implement={}", format);
+ return 0;
+ }
+}
+
+std::size_t GetImageComponentMask(TextureFormat format) {
+ constexpr u8 R = 0b0001;
+ constexpr u8 G = 0b0010;
+ constexpr u8 B = 0b0100;
+ constexpr u8 A = 0b1000;
+ switch (format) {
+ case TextureFormat::R32_G32_B32_A32:
+ case TextureFormat::R16_G16_B16_A16:
+ case TextureFormat::A8R8G8B8:
+ case TextureFormat::A2B10G10R10:
+ case TextureFormat::A4B4G4R4:
+ case TextureFormat::A5B5G5R1:
+ case TextureFormat::A1B5G5R5:
+ return std::size_t{R | G | B | A};
+ case TextureFormat::R32_G32_B32:
+ case TextureFormat::R32_B24G8:
+ case TextureFormat::B5G6R5:
+ case TextureFormat::B6G5R5:
+ return std::size_t{R | G | B};
+ case TextureFormat::R32_G32:
+ case TextureFormat::R16_G16:
+ case TextureFormat::G8R24:
+ case TextureFormat::G24R8:
+ case TextureFormat::G8R8:
+ case TextureFormat::G4R4:
+ return std::size_t{R | G};
+ case TextureFormat::R32:
+ case TextureFormat::R16:
+ case TextureFormat::R8:
+ case TextureFormat::R1:
+ return std::size_t{R};
+ default:
+ UNIMPLEMENTED_MSG("texture format not implement={}", format);
+ return std::size_t{R | G | B | A};
+ }
+}
+
std::size_t GetImageTypeNumCoordinates(Tegra::Shader::ImageType image_type) {
switch (image_type) {
case Tegra::Shader::ImageType::Texture1D:
@@ -37,6 +271,39 @@ std::size_t GetImageTypeNumCoordinates(Tegra::Shader::ImageType image_type) {
}
} // Anonymous namespace
+std::pair<Node, bool> ShaderIR::GetComponentValue(ComponentType component_type, u32 component_size,
+ Node original_value) {
+ switch (component_type) {
+ case ComponentType::SNORM: {
+ // range [-1.0, 1.0]
+ auto cnv_value = Operation(OperationCode::FMul, original_value,
+ Immediate(static_cast<float>(1 << component_size) / 2.f - 1.f));
+ cnv_value = Operation(OperationCode::ICastFloat, std::move(cnv_value));
+ return {BitfieldExtract(std::move(cnv_value), 0, component_size), true};
+ }
+ case ComponentType::SINT:
+ case ComponentType::UNORM: {
+ bool is_signed = component_type == ComponentType::SINT;
+ // range [0.0, 1.0]
+ auto cnv_value = Operation(OperationCode::FMul, original_value,
+ Immediate(static_cast<float>(1 << component_size) - 1.f));
+ return {SignedOperation(OperationCode::ICastFloat, is_signed, std::move(cnv_value)),
+ is_signed};
+ }
+ case ComponentType::UINT: // range [0, (1 << component_size) - 1]
+ return {std::move(original_value), false};
+ case ComponentType::FLOAT:
+ if (component_size == 16) {
+ return {Operation(OperationCode::HCastFloat, original_value), true};
+ } else {
+ return {std::move(original_value), true};
+ }
+ default:
+ UNIMPLEMENTED_MSG("Unimplement component type={}", component_type);
+ return {std::move(original_value), true};
+ }
+}
+
u32 ShaderIR::DecodeImage(NodeBlock& bb, u32 pc) {
const Instruction instr = {program_code[pc]};
const auto opcode = OpCode::Decode(instr);
@@ -53,7 +320,6 @@ u32 ShaderIR::DecodeImage(NodeBlock& bb, u32 pc) {
switch (opcode->get().GetId()) {
case OpCode::Id::SULD: {
- UNIMPLEMENTED_IF(instr.suldst.mode != Tegra::Shader::SurfaceDataMode::P);
UNIMPLEMENTED_IF(instr.suldst.out_of_bounds_store !=
Tegra::Shader::OutOfBoundsStore::Ignore);
@@ -62,17 +328,89 @@ u32 ShaderIR::DecodeImage(NodeBlock& bb, u32 pc) {
: GetBindlessImage(instr.gpr39, type)};
image.MarkRead();
- u32 indexer = 0;
- for (u32 element = 0; element < 4; ++element) {
- if (!instr.suldst.IsComponentEnabled(element)) {
- continue;
+ if (instr.suldst.mode == Tegra::Shader::SurfaceDataMode::P) {
+ u32 indexer = 0;
+ for (u32 element = 0; element < 4; ++element) {
+ if (!instr.suldst.IsComponentEnabled(element)) {
+ continue;
+ }
+ MetaImage meta{image, {}, element};
+ Node value = Operation(OperationCode::ImageLoad, meta, GetCoordinates(type));
+ SetTemporary(bb, indexer++, std::move(value));
+ }
+ for (u32 i = 0; i < indexer; ++i) {
+ SetRegister(bb, instr.gpr0.Value() + i, GetTemporary(i));
+ }
+ } else if (instr.suldst.mode == Tegra::Shader::SurfaceDataMode::D_BA) {
+ UNIMPLEMENTED_IF(instr.suldst.GetStoreDataLayout() != StoreType::Bits32 &&
+ instr.suldst.GetStoreDataLayout() != StoreType::Bits64);
+
+ auto descriptor = [this, instr] {
+ std::optional<Tegra::Engines::SamplerDescriptor> descriptor;
+ if (instr.suldst.is_immediate) {
+ descriptor =
+ registry.ObtainBoundSampler(static_cast<u32>(instr.image.index.Value()));
+ } else {
+ const Node image_register = GetRegister(instr.gpr39);
+ const auto [base_image, buffer, offset] = TrackCbuf(
+ image_register, global_code, static_cast<s64>(global_code.size()));
+ descriptor = registry.ObtainBindlessSampler(buffer, offset);
+ }
+ if (!descriptor) {
+ UNREACHABLE_MSG("Failed to obtain image descriptor");
+ }
+ return *descriptor;
+ }();
+
+ const auto comp_mask = GetImageComponentMask(descriptor.format);
+
+ switch (instr.suldst.GetStoreDataLayout()) {
+ case StoreType::Bits32:
+ case StoreType::Bits64: {
+ u32 indexer = 0;
+ u32 shifted_counter = 0;
+ Node value = Immediate(0);
+ for (u32 element = 0; element < 4; ++element) {
+ if (!IsComponentEnabled(comp_mask, element)) {
+ continue;
+ }
+ const auto component_type = GetComponentType(descriptor, element);
+ const auto component_size = GetComponentSize(descriptor.format, element);
+ MetaImage meta{image, {}, element};
+
+ auto [converted_value, is_signed] = GetComponentValue(
+ component_type, component_size,
+ Operation(OperationCode::ImageLoad, meta, GetCoordinates(type)));
+
+ // shift element to correct position
+ const auto shifted = shifted_counter;
+ if (shifted > 0) {
+ converted_value =
+ SignedOperation(OperationCode::ILogicalShiftLeft, is_signed,
+ std::move(converted_value), Immediate(shifted));
+ }
+ shifted_counter += component_size;
+
+ // add value into result
+ value = Operation(OperationCode::UBitwiseOr, value, std::move(converted_value));
+
+ // if we shifted enough for 1 byte -> we save it into temp
+ if (shifted_counter >= 32) {
+ SetTemporary(bb, indexer++, std::move(value));
+ // reset counter and value to prepare pack next byte
+ value = Immediate(0);
+ shifted_counter = 0;
+ }
+ }
+ for (u32 i = 0; i < indexer; ++i) {
+ SetRegister(bb, instr.gpr0.Value() + i, GetTemporary(i));
+ }
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
}
- MetaImage meta{image, {}, element};
- Node value = Operation(OperationCode::ImageLoad, meta, GetCoordinates(type));
- SetTemporary(bb, indexer++, std::move(value));
- }
- for (u32 i = 0; i < indexer; ++i) {
- SetRegister(bb, instr.gpr0.Value() + i, GetTemporary(i));
}
break;
}
diff --git a/src/video_core/shader/decode/other.cpp b/src/video_core/shader/decode/other.cpp
index 4944e9d69..d4f95b18c 100644
--- a/src/video_core/shader/decode/other.cpp
+++ b/src/video_core/shader/decode/other.cpp
@@ -11,12 +11,17 @@
namespace VideoCommon::Shader {
+using std::move;
using Tegra::Shader::ConditionCode;
using Tegra::Shader::Instruction;
+using Tegra::Shader::IpaInterpMode;
using Tegra::Shader::OpCode;
+using Tegra::Shader::PixelImap;
using Tegra::Shader::Register;
using Tegra::Shader::SystemVariable;
+using Index = Tegra::Shader::Attribute::Index;
+
u32 ShaderIR::DecodeOther(NodeBlock& bb, u32 pc) {
const Instruction instr = {program_code[pc]};
const auto opcode = OpCode::Decode(instr);
@@ -66,18 +71,24 @@ u32 ShaderIR::DecodeOther(NodeBlock& bb, u32 pc) {
bb.push_back(Operation(OperationCode::Discard));
break;
}
- case OpCode::Id::MOV_SYS: {
+ case OpCode::Id::S2R: {
const Node value = [this, instr] {
switch (instr.sys20) {
case SystemVariable::LaneId:
- LOG_WARNING(HW_GPU, "MOV_SYS instruction with LaneId is incomplete");
+ LOG_WARNING(HW_GPU, "S2R instruction with LaneId is incomplete");
return Immediate(0U);
case SystemVariable::InvocationId:
return Operation(OperationCode::InvocationId);
case SystemVariable::Ydirection:
return Operation(OperationCode::YNegate);
case SystemVariable::InvocationInfo:
- LOG_WARNING(HW_GPU, "MOV_SYS instruction with InvocationInfo is incomplete");
+ LOG_WARNING(HW_GPU, "S2R instruction with InvocationInfo is incomplete");
+ return Immediate(0U);
+ case SystemVariable::WscaleFactorXY:
+ UNIMPLEMENTED_MSG("S2R WscaleFactorXY is not implemented");
+ return Immediate(0U);
+ case SystemVariable::WscaleFactorZ:
+ UNIMPLEMENTED_MSG("S2R WscaleFactorZ is not implemented");
return Immediate(0U);
case SystemVariable::Tid: {
Node value = Immediate(0);
@@ -213,27 +224,28 @@ u32 ShaderIR::DecodeOther(NodeBlock& bb, u32 pc) {
}
case OpCode::Id::IPA: {
const bool is_physical = instr.ipa.idx && instr.gpr8.Value() != 0xff;
-
const auto attribute = instr.attribute.fmt28;
- const Tegra::Shader::IpaMode input_mode{instr.ipa.interp_mode.Value(),
- instr.ipa.sample_mode.Value()};
+ const Index index = attribute.index;
Node value = is_physical ? GetPhysicalInputAttribute(instr.gpr8)
- : GetInputAttribute(attribute.index, attribute.element);
- const Tegra::Shader::Attribute::Index index = attribute.index.Value();
- const bool is_generic = index >= Tegra::Shader::Attribute::Index::Attribute_0 &&
- index <= Tegra::Shader::Attribute::Index::Attribute_31;
- if (is_generic || is_physical) {
- // TODO(Blinkhawk): There are cases where a perspective attribute use PASS.
- // In theory by setting them as perspective, OpenGL does the perspective correction.
- // A way must figured to reverse the last step of it.
- if (input_mode.interpolation_mode == Tegra::Shader::IpaInterpMode::Multiply) {
- value = Operation(OperationCode::FMul, PRECISE, value, GetRegister(instr.gpr20));
+ : GetInputAttribute(index, attribute.element);
+
+ // Code taken from Ryujinx.
+ if (index >= Index::Attribute_0 && index <= Index::Attribute_31) {
+ const u32 location = static_cast<u32>(index) - static_cast<u32>(Index::Attribute_0);
+ if (header.ps.GetPixelImap(location) == PixelImap::Perspective) {
+ Node position_w = GetInputAttribute(Index::Position, 3);
+ value = Operation(OperationCode::FMul, move(value), move(position_w));
}
}
- value = GetSaturatedFloat(value, instr.ipa.saturate);
- SetRegister(bb, instr.gpr0, value);
+ if (instr.ipa.interp_mode == IpaInterpMode::Multiply) {
+ value = Operation(OperationCode::FMul, move(value), GetRegister(instr.gpr20));
+ }
+
+ value = GetSaturatedFloat(move(value), instr.ipa.saturate);
+
+ SetRegister(bb, instr.gpr0, move(value));
break;
}
case OpCode::Id::OUT_R: {
diff --git a/src/video_core/shader/decode/texture.cpp b/src/video_core/shader/decode/texture.cpp
index 48350e042..6c4a1358b 100644
--- a/src/video_core/shader/decode/texture.cpp
+++ b/src/video_core/shader/decode/texture.cpp
@@ -780,20 +780,6 @@ Node4 ShaderIR::GetTldsCode(Instruction instr, TextureType texture_type, bool is
// When lod is used always is in gpr20
const Node lod = lod_enabled ? GetRegister(instr.gpr20) : Immediate(0);
- // Fill empty entries from the guest sampler
- const std::size_t entry_coord_count = GetCoordCount(sampler.GetType());
- if (type_coord_count != entry_coord_count) {
- LOG_WARNING(HW_GPU, "Bound and built texture types mismatch");
-
- // When the size is higher we insert zeroes
- for (std::size_t i = type_coord_count; i < entry_coord_count; ++i) {
- coords.push_back(GetRegister(Register::ZeroIndex));
- }
-
- // Then we ensure the size matches the number of entries (dropping unused values)
- coords.resize(entry_coord_count);
- }
-
Node4 values;
for (u32 element = 0; element < values.size(); ++element) {
auto coords_copy = coords;
diff --git a/src/video_core/shader/decode/video.cpp b/src/video_core/shader/decode/video.cpp
index b047cf870..64ba60ea2 100644
--- a/src/video_core/shader/decode/video.cpp
+++ b/src/video_core/shader/decode/video.cpp
@@ -10,16 +10,24 @@
namespace VideoCommon::Shader {
+using std::move;
using Tegra::Shader::Instruction;
using Tegra::Shader::OpCode;
using Tegra::Shader::Pred;
using Tegra::Shader::VideoType;
using Tegra::Shader::VmadShr;
+using Tegra::Shader::VmnmxOperation;
+using Tegra::Shader::VmnmxType;
u32 ShaderIR::DecodeVideo(NodeBlock& bb, u32 pc) {
const Instruction instr = {program_code[pc]};
const auto opcode = OpCode::Decode(instr);
+ if (opcode->get().GetId() == OpCode::Id::VMNMX) {
+ DecodeVMNMX(bb, instr);
+ return pc;
+ }
+
const Node op_a =
GetVideoOperand(GetRegister(instr.gpr8), instr.video.is_byte_chunk_a, instr.video.signed_a,
instr.video.type_a, instr.video.byte_height_a);
@@ -109,4 +117,54 @@ Node ShaderIR::GetVideoOperand(Node op, bool is_chunk, bool is_signed,
}
}
+void ShaderIR::DecodeVMNMX(NodeBlock& bb, Tegra::Shader::Instruction instr) {
+ UNIMPLEMENTED_IF(!instr.vmnmx.is_op_b_register);
+ UNIMPLEMENTED_IF(instr.vmnmx.SourceFormatA() != VmnmxType::Bits32);
+ UNIMPLEMENTED_IF(instr.vmnmx.SourceFormatB() != VmnmxType::Bits32);
+ UNIMPLEMENTED_IF(instr.vmnmx.is_src_a_signed != instr.vmnmx.is_src_b_signed);
+ UNIMPLEMENTED_IF(instr.vmnmx.sat);
+ UNIMPLEMENTED_IF(instr.generates_cc);
+
+ Node op_a = GetRegister(instr.gpr8);
+ Node op_b = GetRegister(instr.gpr20);
+ Node op_c = GetRegister(instr.gpr39);
+
+ const bool is_oper1_signed = instr.vmnmx.is_src_a_signed; // Stubbed
+ const bool is_oper2_signed = instr.vmnmx.is_dest_signed;
+
+ const auto operation_a = instr.vmnmx.mx ? OperationCode::IMax : OperationCode::IMin;
+ Node value = SignedOperation(operation_a, is_oper1_signed, move(op_a), move(op_b));
+
+ switch (instr.vmnmx.operation) {
+ case VmnmxOperation::Mrg_16H:
+ value = BitfieldInsert(move(op_c), move(value), 16, 16);
+ break;
+ case VmnmxOperation::Mrg_16L:
+ value = BitfieldInsert(move(op_c), move(value), 0, 16);
+ break;
+ case VmnmxOperation::Mrg_8B0:
+ value = BitfieldInsert(move(op_c), move(value), 0, 8);
+ break;
+ case VmnmxOperation::Mrg_8B2:
+ value = BitfieldInsert(move(op_c), move(value), 16, 8);
+ break;
+ case VmnmxOperation::Acc:
+ value = Operation(OperationCode::IAdd, move(value), move(op_c));
+ break;
+ case VmnmxOperation::Min:
+ value = SignedOperation(OperationCode::IMin, is_oper2_signed, move(value), move(op_c));
+ break;
+ case VmnmxOperation::Max:
+ value = SignedOperation(OperationCode::IMax, is_oper2_signed, move(value), move(op_c));
+ break;
+ case VmnmxOperation::Nop:
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+
+ SetRegister(bb, instr.gpr0, move(value));
+}
+
} // namespace VideoCommon::Shader
diff --git a/src/video_core/shader/shader_ir.cpp b/src/video_core/shader/shader_ir.cpp
index baf7188d2..8852c8a1b 100644
--- a/src/video_core/shader/shader_ir.cpp
+++ b/src/video_core/shader/shader_ir.cpp
@@ -359,6 +359,9 @@ Node ShaderIR::GetConditionCode(Tegra::Shader::ConditionCode cc) const {
switch (cc) {
case Tegra::Shader::ConditionCode::NEU:
return GetInternalFlag(InternalFlag::Zero, true);
+ case Tegra::Shader::ConditionCode::FCSM_TR:
+ UNIMPLEMENTED_MSG("EXIT.FCSM_TR is not implemented");
+ return MakeNode<PredicateNode>(Pred::NeverExecute, false);
default:
UNIMPLEMENTED_MSG("Unimplemented condition code: {}", static_cast<u32>(cc));
return MakeNode<PredicateNode>(Pred::NeverExecute, false);
diff --git a/src/video_core/shader/shader_ir.h b/src/video_core/shader/shader_ir.h
index 80fc9b82c..c6e7bdf50 100644
--- a/src/video_core/shader/shader_ir.h
+++ b/src/video_core/shader/shader_ir.h
@@ -312,6 +312,10 @@ private:
/// Conditionally saturates a half float pair
Node GetSaturatedHalfFloat(Node value, bool saturate = true);
+ /// Get image component value by type and size
+ std::pair<Node, bool> GetComponentValue(Tegra::Texture::ComponentType component_type,
+ u32 component_size, Node original_value);
+
/// Returns a predicate comparing two floats
Node GetPredicateComparisonFloat(Tegra::Shader::PredCondition condition, Node op_a, Node op_b);
/// Returns a predicate comparing two integers
@@ -350,6 +354,9 @@ private:
/// Marks the usage of a input or output attribute.
void MarkAttributeUsage(Tegra::Shader::Attribute::Index index, u64 element);
+ /// Decodes VMNMX instruction and inserts its code into the passed basic block.
+ void DecodeVMNMX(NodeBlock& bb, Tegra::Shader::Instruction instr);
+
void WriteTexInstructionFloat(NodeBlock& bb, Tegra::Shader::Instruction instr,
const Node4& components);
diff --git a/src/video_core/surface.h b/src/video_core/surface.h
index ae8817465..e0acd44d3 100644
--- a/src/video_core/surface.h
+++ b/src/video_core/surface.h
@@ -504,103 +504,6 @@ static constexpr u32 GetBytesPerPixel(PixelFormat pixel_format) {
return GetFormatBpp(pixel_format) / CHAR_BIT;
}
-enum class SurfaceCompression {
- None, // Not compressed
- Compressed, // Texture is compressed
- Converted, // Texture is converted before upload or after download
- Rearranged, // Texture is swizzled before upload or after download
-};
-
-constexpr std::array<SurfaceCompression, MaxPixelFormat> compression_type_table = {{
- SurfaceCompression::None, // ABGR8U
- SurfaceCompression::None, // ABGR8S
- SurfaceCompression::None, // ABGR8UI
- SurfaceCompression::None, // B5G6R5U
- SurfaceCompression::None, // A2B10G10R10U
- SurfaceCompression::None, // A1B5G5R5U
- SurfaceCompression::None, // R8U
- SurfaceCompression::None, // R8UI
- SurfaceCompression::None, // RGBA16F
- SurfaceCompression::None, // RGBA16U
- SurfaceCompression::None, // RGBA16S
- SurfaceCompression::None, // RGBA16UI
- SurfaceCompression::None, // R11FG11FB10F
- SurfaceCompression::None, // RGBA32UI
- SurfaceCompression::Compressed, // DXT1
- SurfaceCompression::Compressed, // DXT23
- SurfaceCompression::Compressed, // DXT45
- SurfaceCompression::Compressed, // DXN1
- SurfaceCompression::Compressed, // DXN2UNORM
- SurfaceCompression::Compressed, // DXN2SNORM
- SurfaceCompression::Compressed, // BC7U
- SurfaceCompression::Compressed, // BC6H_UF16
- SurfaceCompression::Compressed, // BC6H_SF16
- SurfaceCompression::Converted, // ASTC_2D_4X4
- SurfaceCompression::None, // BGRA8
- SurfaceCompression::None, // RGBA32F
- SurfaceCompression::None, // RG32F
- SurfaceCompression::None, // R32F
- SurfaceCompression::None, // R16F
- SurfaceCompression::None, // R16U
- SurfaceCompression::None, // R16S
- SurfaceCompression::None, // R16UI
- SurfaceCompression::None, // R16I
- SurfaceCompression::None, // RG16
- SurfaceCompression::None, // RG16F
- SurfaceCompression::None, // RG16UI
- SurfaceCompression::None, // RG16I
- SurfaceCompression::None, // RG16S
- SurfaceCompression::None, // RGB32F
- SurfaceCompression::None, // RGBA8_SRGB
- SurfaceCompression::None, // RG8U
- SurfaceCompression::None, // RG8S
- SurfaceCompression::None, // RG32UI
- SurfaceCompression::None, // RGBX16F
- SurfaceCompression::None, // R32UI
- SurfaceCompression::None, // R32I
- SurfaceCompression::Converted, // ASTC_2D_8X8
- SurfaceCompression::Converted, // ASTC_2D_8X5
- SurfaceCompression::Converted, // ASTC_2D_5X4
- SurfaceCompression::None, // BGRA8_SRGB
- SurfaceCompression::Compressed, // DXT1_SRGB
- SurfaceCompression::Compressed, // DXT23_SRGB
- SurfaceCompression::Compressed, // DXT45_SRGB
- SurfaceCompression::Compressed, // BC7U_SRGB
- SurfaceCompression::None, // R4G4B4A4U
- SurfaceCompression::Converted, // ASTC_2D_4X4_SRGB
- SurfaceCompression::Converted, // ASTC_2D_8X8_SRGB
- SurfaceCompression::Converted, // ASTC_2D_8X5_SRGB
- SurfaceCompression::Converted, // ASTC_2D_5X4_SRGB
- SurfaceCompression::Converted, // ASTC_2D_5X5
- SurfaceCompression::Converted, // ASTC_2D_5X5_SRGB
- SurfaceCompression::Converted, // ASTC_2D_10X8
- SurfaceCompression::Converted, // ASTC_2D_10X8_SRGB
- SurfaceCompression::Converted, // ASTC_2D_6X6
- SurfaceCompression::Converted, // ASTC_2D_6X6_SRGB
- SurfaceCompression::Converted, // ASTC_2D_10X10
- SurfaceCompression::Converted, // ASTC_2D_10X10_SRGB
- SurfaceCompression::Converted, // ASTC_2D_12X12
- SurfaceCompression::Converted, // ASTC_2D_12X12_SRGB
- SurfaceCompression::Converted, // ASTC_2D_8X6
- SurfaceCompression::Converted, // ASTC_2D_8X6_SRGB
- SurfaceCompression::Converted, // ASTC_2D_6X5
- SurfaceCompression::Converted, // ASTC_2D_6X5_SRGB
- SurfaceCompression::None, // E5B9G9R9F
- SurfaceCompression::None, // Z32F
- SurfaceCompression::None, // Z16
- SurfaceCompression::None, // Z24S8
- SurfaceCompression::Rearranged, // S8Z24
- SurfaceCompression::None, // Z32FS8
-}};
-
-constexpr SurfaceCompression GetFormatCompressionType(PixelFormat format) {
- if (format == PixelFormat::Invalid) {
- return SurfaceCompression::None;
- }
- DEBUG_ASSERT(static_cast<std::size_t>(format) < compression_type_table.size());
- return compression_type_table[static_cast<std::size_t>(format)];
-}
-
SurfaceTarget SurfaceTargetFromTextureType(Tegra::Texture::TextureType texture_type);
bool SurfaceTargetIsLayered(SurfaceTarget target);
diff --git a/src/video_core/texture_cache/surface_base.cpp b/src/video_core/texture_cache/surface_base.cpp
index 002df414f..7af0e792c 100644
--- a/src/video_core/texture_cache/surface_base.cpp
+++ b/src/video_core/texture_cache/surface_base.cpp
@@ -18,15 +18,20 @@ MICROPROFILE_DEFINE(GPU_Flush_Texture, "GPU", "Texture Flush", MP_RGB(128, 192,
using Tegra::Texture::ConvertFromGuestToHost;
using VideoCore::MortonSwizzleMode;
-using VideoCore::Surface::SurfaceCompression;
+using VideoCore::Surface::IsPixelFormatASTC;
+using VideoCore::Surface::PixelFormat;
StagingCache::StagingCache() = default;
StagingCache::~StagingCache() = default;
-SurfaceBaseImpl::SurfaceBaseImpl(GPUVAddr gpu_addr, const SurfaceParams& params)
- : params{params}, host_memory_size{params.GetHostSizeInBytes()}, gpu_addr{gpu_addr},
- mipmap_sizes(params.num_levels), mipmap_offsets(params.num_levels) {
+SurfaceBaseImpl::SurfaceBaseImpl(GPUVAddr gpu_addr, const SurfaceParams& params,
+ bool is_astc_supported)
+ : params{params}, gpu_addr{gpu_addr}, mipmap_sizes(params.num_levels),
+ mipmap_offsets(params.num_levels) {
+ is_converted = IsPixelFormatASTC(params.pixel_format) && !is_astc_supported;
+ host_memory_size = params.GetHostSizeInBytes(is_converted);
+
std::size_t offset = 0;
for (u32 level = 0; level < params.num_levels; ++level) {
const std::size_t mipmap_size{params.GetGuestMipmapSize(level)};
@@ -164,7 +169,7 @@ void SurfaceBaseImpl::SwizzleFunc(MortonSwizzleMode mode, u8* memory, const Surf
std::size_t guest_offset{mipmap_offsets[level]};
if (params.is_layered) {
- std::size_t host_offset{0};
+ std::size_t host_offset = 0;
const std::size_t guest_stride = layer_size;
const std::size_t host_stride = params.GetHostLayerSize(level);
for (u32 layer = 0; layer < params.depth; ++layer) {
@@ -185,28 +190,17 @@ void SurfaceBaseImpl::LoadBuffer(Tegra::MemoryManager& memory_manager,
MICROPROFILE_SCOPE(GPU_Load_Texture);
auto& staging_buffer = staging_cache.GetBuffer(0);
u8* host_ptr;
- is_continuous = memory_manager.IsBlockContinuous(gpu_addr, guest_memory_size);
-
- // Handle continuouty
- if (is_continuous) {
- // Use physical memory directly
- host_ptr = memory_manager.GetPointer(gpu_addr);
- if (!host_ptr) {
- return;
- }
- } else {
- // Use an extra temporal buffer
- auto& tmp_buffer = staging_cache.GetBuffer(1);
- tmp_buffer.resize(guest_memory_size);
- host_ptr = tmp_buffer.data();
- memory_manager.ReadBlockUnsafe(gpu_addr, host_ptr, guest_memory_size);
- }
+ // Use an extra temporal buffer
+ auto& tmp_buffer = staging_cache.GetBuffer(1);
+ tmp_buffer.resize(guest_memory_size);
+ host_ptr = tmp_buffer.data();
+ memory_manager.ReadBlockUnsafe(gpu_addr, host_ptr, guest_memory_size);
if (params.is_tiled) {
ASSERT_MSG(params.block_width == 0, "Block width is defined as {} on texture target {}",
params.block_width, static_cast<u32>(params.target));
for (u32 level = 0; level < params.num_levels; ++level) {
- const std::size_t host_offset{params.GetHostMipmapLevelOffset(level)};
+ const std::size_t host_offset{params.GetHostMipmapLevelOffset(level, false)};
SwizzleFunc(MortonSwizzleMode::MortonToLinear, host_ptr, params,
staging_buffer.data() + host_offset, level);
}
@@ -219,7 +213,7 @@ void SurfaceBaseImpl::LoadBuffer(Tegra::MemoryManager& memory_manager,
const u32 height{(params.height + block_height - 1) / block_height};
const u32 copy_size{width * bpp};
if (params.pitch == copy_size) {
- std::memcpy(staging_buffer.data(), host_ptr, params.GetHostSizeInBytes());
+ std::memcpy(staging_buffer.data(), host_ptr, params.GetHostSizeInBytes(false));
} else {
const u8* start{host_ptr};
u8* write_to{staging_buffer.data()};
@@ -231,19 +225,15 @@ void SurfaceBaseImpl::LoadBuffer(Tegra::MemoryManager& memory_manager,
}
}
- auto compression_type = params.GetCompressionType();
- if (compression_type == SurfaceCompression::None ||
- compression_type == SurfaceCompression::Compressed)
+ if (!is_converted && params.pixel_format != PixelFormat::S8Z24) {
return;
+ }
- for (u32 level_up = params.num_levels; level_up > 0; --level_up) {
- const u32 level = level_up - 1;
- const std::size_t in_host_offset{params.GetHostMipmapLevelOffset(level)};
- const std::size_t out_host_offset = compression_type == SurfaceCompression::Rearranged
- ? in_host_offset
- : params.GetConvertedMipmapOffset(level);
- u8* in_buffer = staging_buffer.data() + in_host_offset;
- u8* out_buffer = staging_buffer.data() + out_host_offset;
+ for (u32 level = params.num_levels; level--;) {
+ const std::size_t in_host_offset{params.GetHostMipmapLevelOffset(level, false)};
+ const std::size_t out_host_offset{params.GetHostMipmapLevelOffset(level, is_converted)};
+ u8* const in_buffer = staging_buffer.data() + in_host_offset;
+ u8* const out_buffer = staging_buffer.data() + out_host_offset;
ConvertFromGuestToHost(in_buffer, out_buffer, params.pixel_format,
params.GetMipWidth(level), params.GetMipHeight(level),
params.GetMipDepth(level), true, true);
@@ -256,24 +246,15 @@ void SurfaceBaseImpl::FlushBuffer(Tegra::MemoryManager& memory_manager,
auto& staging_buffer = staging_cache.GetBuffer(0);
u8* host_ptr;
- // Handle continuouty
- if (is_continuous) {
- // Use physical memory directly
- host_ptr = memory_manager.GetPointer(gpu_addr);
- if (!host_ptr) {
- return;
- }
- } else {
- // Use an extra temporal buffer
- auto& tmp_buffer = staging_cache.GetBuffer(1);
- tmp_buffer.resize(guest_memory_size);
- host_ptr = tmp_buffer.data();
- }
+ // Use an extra temporal buffer
+ auto& tmp_buffer = staging_cache.GetBuffer(1);
+ tmp_buffer.resize(guest_memory_size);
+ host_ptr = tmp_buffer.data();
if (params.is_tiled) {
ASSERT_MSG(params.block_width == 0, "Block width is defined as {}", params.block_width);
for (u32 level = 0; level < params.num_levels; ++level) {
- const std::size_t host_offset{params.GetHostMipmapLevelOffset(level)};
+ const std::size_t host_offset{params.GetHostMipmapLevelOffset(level, false)};
SwizzleFunc(MortonSwizzleMode::LinearToMorton, host_ptr, params,
staging_buffer.data() + host_offset, level);
}
@@ -299,9 +280,7 @@ void SurfaceBaseImpl::FlushBuffer(Tegra::MemoryManager& memory_manager,
}
}
}
- if (!is_continuous) {
- memory_manager.WriteBlockUnsafe(gpu_addr, host_ptr, guest_memory_size);
- }
+ memory_manager.WriteBlockUnsafe(gpu_addr, host_ptr, guest_memory_size);
}
} // namespace VideoCommon
diff --git a/src/video_core/texture_cache/surface_base.h b/src/video_core/texture_cache/surface_base.h
index 5f79bb0aa..a39a8661b 100644
--- a/src/video_core/texture_cache/surface_base.h
+++ b/src/video_core/texture_cache/surface_base.h
@@ -68,8 +68,8 @@ public:
return gpu_addr;
}
- bool Overlaps(const CacheAddr start, const CacheAddr end) const {
- return (cache_addr < end) && (cache_addr_end > start);
+ bool Overlaps(const VAddr start, const VAddr end) const {
+ return (cpu_addr < end) && (cpu_addr_end > start);
}
bool IsInside(const GPUVAddr other_start, const GPUVAddr other_end) {
@@ -86,21 +86,13 @@ public:
return cpu_addr;
}
- void SetCpuAddr(const VAddr new_addr) {
- cpu_addr = new_addr;
- }
-
- CacheAddr GetCacheAddr() const {
- return cache_addr;
+ VAddr GetCpuAddrEnd() const {
+ return cpu_addr_end;
}
- CacheAddr GetCacheAddrEnd() const {
- return cache_addr_end;
- }
-
- void SetCacheAddr(const CacheAddr new_addr) {
- cache_addr = new_addr;
- cache_addr_end = new_addr + guest_memory_size;
+ void SetCpuAddr(const VAddr new_addr) {
+ cpu_addr = new_addr;
+ cpu_addr_end = new_addr + guest_memory_size;
}
const SurfaceParams& GetSurfaceParams() const {
@@ -119,18 +111,14 @@ public:
return mipmap_sizes[level];
}
- void MarkAsContinuous(const bool is_continuous) {
- this->is_continuous = is_continuous;
- }
-
- bool IsContinuous() const {
- return is_continuous;
- }
-
bool IsLinear() const {
return !params.is_tiled;
}
+ bool IsConverted() const {
+ return is_converted;
+ }
+
bool MatchFormat(VideoCore::Surface::PixelFormat pixel_format) const {
return params.pixel_format == pixel_format;
}
@@ -160,7 +148,8 @@ public:
}
protected:
- explicit SurfaceBaseImpl(GPUVAddr gpu_addr, const SurfaceParams& params);
+ explicit SurfaceBaseImpl(GPUVAddr gpu_addr, const SurfaceParams& params,
+ bool is_astc_supported);
~SurfaceBaseImpl() = default;
virtual void DecorateSurfaceName() = 0;
@@ -168,12 +157,11 @@ protected:
const SurfaceParams params;
std::size_t layer_size;
std::size_t guest_memory_size;
- const std::size_t host_memory_size;
+ std::size_t host_memory_size;
GPUVAddr gpu_addr{};
- CacheAddr cache_addr{};
- CacheAddr cache_addr_end{};
VAddr cpu_addr{};
- bool is_continuous{};
+ VAddr cpu_addr_end{};
+ bool is_converted{};
std::vector<std::size_t> mipmap_sizes;
std::vector<std::size_t> mipmap_offsets;
@@ -288,8 +276,9 @@ public:
}
protected:
- explicit SurfaceBase(const GPUVAddr gpu_addr, const SurfaceParams& params)
- : SurfaceBaseImpl(gpu_addr, params) {}
+ explicit SurfaceBase(const GPUVAddr gpu_addr, const SurfaceParams& params,
+ bool is_astc_supported)
+ : SurfaceBaseImpl(gpu_addr, params, is_astc_supported) {}
~SurfaceBase() = default;
diff --git a/src/video_core/texture_cache/surface_params.cpp b/src/video_core/texture_cache/surface_params.cpp
index 9931c5ef7..6f3ef45be 100644
--- a/src/video_core/texture_cache/surface_params.cpp
+++ b/src/video_core/texture_cache/surface_params.cpp
@@ -113,10 +113,8 @@ SurfaceParams SurfaceParams::CreateForTexture(const FormatLookupTable& lookup_ta
params.height = tic.Height();
params.depth = tic.Depth();
params.pitch = params.is_tiled ? 0 : tic.Pitch();
- if (params.target == SurfaceTarget::Texture2D && params.depth > 1) {
- params.depth = 1;
- } else if (params.target == SurfaceTarget::TextureCubemap ||
- params.target == SurfaceTarget::TextureCubeArray) {
+ if (params.target == SurfaceTarget::TextureCubemap ||
+ params.target == SurfaceTarget::TextureCubeArray) {
params.depth *= 6;
}
params.num_levels = tic.max_mip_level + 1;
@@ -309,28 +307,26 @@ std::size_t SurfaceParams::GetGuestMipmapLevelOffset(u32 level) const {
return offset;
}
-std::size_t SurfaceParams::GetHostMipmapLevelOffset(u32 level) const {
- std::size_t offset = 0;
- for (u32 i = 0; i < level; i++) {
- offset += GetInnerMipmapMemorySize(i, true, false) * GetNumLayers();
- }
- return offset;
-}
-
-std::size_t SurfaceParams::GetConvertedMipmapOffset(u32 level) const {
+std::size_t SurfaceParams::GetHostMipmapLevelOffset(u32 level, bool is_converted) const {
std::size_t offset = 0;
- for (u32 i = 0; i < level; i++) {
- offset += GetConvertedMipmapSize(i);
+ if (is_converted) {
+ for (u32 i = 0; i < level; ++i) {
+ offset += GetConvertedMipmapSize(i) * GetNumLayers();
+ }
+ } else {
+ for (u32 i = 0; i < level; ++i) {
+ offset += GetInnerMipmapMemorySize(i, true, false) * GetNumLayers();
+ }
}
return offset;
}
std::size_t SurfaceParams::GetConvertedMipmapSize(u32 level) const {
constexpr std::size_t rgba8_bpp = 4ULL;
- const std::size_t width_t = GetMipWidth(level);
- const std::size_t height_t = GetMipHeight(level);
- const std::size_t depth_t = is_layered ? depth : GetMipDepth(level);
- return width_t * height_t * depth_t * rgba8_bpp;
+ const std::size_t mip_width = GetMipWidth(level);
+ const std::size_t mip_height = GetMipHeight(level);
+ const std::size_t mip_depth = is_layered ? 1 : GetMipDepth(level);
+ return mip_width * mip_height * mip_depth * rgba8_bpp;
}
std::size_t SurfaceParams::GetLayerSize(bool as_host_size, bool uncompressed) const {
diff --git a/src/video_core/texture_cache/surface_params.h b/src/video_core/texture_cache/surface_params.h
index 995cc3818..24957df8d 100644
--- a/src/video_core/texture_cache/surface_params.h
+++ b/src/video_core/texture_cache/surface_params.h
@@ -20,8 +20,6 @@ namespace VideoCommon {
class FormatLookupTable;
-using VideoCore::Surface::SurfaceCompression;
-
class SurfaceParams {
public:
/// Creates SurfaceCachedParams from a texture configuration.
@@ -67,16 +65,14 @@ public:
return GetInnerMemorySize(false, false, false);
}
- std::size_t GetHostSizeInBytes() const {
- std::size_t host_size_in_bytes;
- if (GetCompressionType() == SurfaceCompression::Converted) {
- // ASTC is uncompressed in software, in emulated as RGBA8
- host_size_in_bytes = 0;
- for (u32 level = 0; level < num_levels; ++level) {
- host_size_in_bytes += GetConvertedMipmapSize(level);
- }
- } else {
- host_size_in_bytes = GetInnerMemorySize(true, false, false);
+ std::size_t GetHostSizeInBytes(bool is_converted) const {
+ if (!is_converted) {
+ return GetInnerMemorySize(true, false, false);
+ }
+ // ASTC is uncompressed in software, in emulated as RGBA8
+ std::size_t host_size_in_bytes = 0;
+ for (u32 level = 0; level < num_levels; ++level) {
+ host_size_in_bytes += GetConvertedMipmapSize(level) * GetNumLayers();
}
return host_size_in_bytes;
}
@@ -107,9 +103,8 @@ public:
u32 GetMipBlockDepth(u32 level) const;
/// Returns the best possible row/pitch alignment for the surface.
- u32 GetRowAlignment(u32 level) const {
- const u32 bpp =
- GetCompressionType() == SurfaceCompression::Converted ? 4 : GetBytesPerPixel();
+ u32 GetRowAlignment(u32 level, bool is_converted) const {
+ const u32 bpp = is_converted ? 4 : GetBytesPerPixel();
return 1U << Common::CountTrailingZeroes32(GetMipWidth(level) * bpp);
}
@@ -117,11 +112,7 @@ public:
std::size_t GetGuestMipmapLevelOffset(u32 level) const;
/// Returns the offset in bytes in host memory (linear) of a given mipmap level.
- std::size_t GetHostMipmapLevelOffset(u32 level) const;
-
- /// Returns the offset in bytes in host memory (linear) of a given mipmap level
- /// for a texture that is converted in host gpu.
- std::size_t GetConvertedMipmapOffset(u32 level) const;
+ std::size_t GetHostMipmapLevelOffset(u32 level, bool is_converted) const;
/// Returns the size in bytes in guest memory of a given mipmap level.
std::size_t GetGuestMipmapSize(u32 level) const {
@@ -196,11 +187,6 @@ public:
pixel_format < VideoCore::Surface::PixelFormat::MaxDepthStencilFormat;
}
- /// Returns how the compression should be handled for this texture.
- SurfaceCompression GetCompressionType() const {
- return VideoCore::Surface::GetFormatCompressionType(pixel_format);
- }
-
/// Returns is the surface is a TextureBuffer type of surface.
bool IsBuffer() const {
return target == VideoCore::Surface::SurfaceTarget::TextureBuffer;
diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h
index 6cdbe63d0..4edd4313b 100644
--- a/src/video_core/texture_cache/texture_cache.h
+++ b/src/video_core/texture_cache/texture_cache.h
@@ -52,11 +52,9 @@ using RenderTargetConfig = Tegra::Engines::Maxwell3D::Regs::RenderTargetConfig;
template <typename TSurface, typename TView>
class TextureCache {
- using IntervalMap = boost::icl::interval_map<CacheAddr, std::set<TSurface>>;
- using IntervalType = typename IntervalMap::interval_type;
public:
- void InvalidateRegion(CacheAddr addr, std::size_t size) {
+ void InvalidateRegion(VAddr addr, std::size_t size) {
std::lock_guard lock{mutex};
for (const auto& surface : GetSurfacesInRegion(addr, size)) {
@@ -76,7 +74,7 @@ public:
guard_samplers = new_guard;
}
- void FlushRegion(CacheAddr addr, std::size_t size) {
+ void FlushRegion(VAddr addr, std::size_t size) {
std::lock_guard lock{mutex};
auto surfaces = GetSurfacesInRegion(addr, size);
@@ -99,9 +97,9 @@ public:
return GetNullSurface(SurfaceParams::ExpectedTarget(entry));
}
- const auto host_ptr{system.GPU().MemoryManager().GetPointer(gpu_addr)};
- const auto cache_addr{ToCacheAddr(host_ptr)};
- if (!cache_addr) {
+ const std::optional<VAddr> cpu_addr =
+ system.GPU().MemoryManager().GpuToCpuAddress(gpu_addr);
+ if (!cpu_addr) {
return GetNullSurface(SurfaceParams::ExpectedTarget(entry));
}
@@ -110,7 +108,7 @@ public:
}
const auto params{SurfaceParams::CreateForTexture(format_lookup_table, tic, entry)};
- const auto [surface, view] = GetSurface(gpu_addr, cache_addr, params, true, false);
+ const auto [surface, view] = GetSurface(gpu_addr, *cpu_addr, params, false);
if (guard_samplers) {
sampled_textures.push_back(surface);
}
@@ -124,13 +122,13 @@ public:
if (!gpu_addr) {
return GetNullSurface(SurfaceParams::ExpectedTarget(entry));
}
- const auto host_ptr{system.GPU().MemoryManager().GetPointer(gpu_addr)};
- const auto cache_addr{ToCacheAddr(host_ptr)};
- if (!cache_addr) {
+ const std::optional<VAddr> cpu_addr =
+ system.GPU().MemoryManager().GpuToCpuAddress(gpu_addr);
+ if (!cpu_addr) {
return GetNullSurface(SurfaceParams::ExpectedTarget(entry));
}
const auto params{SurfaceParams::CreateForImage(format_lookup_table, tic, entry)};
- const auto [surface, view] = GetSurface(gpu_addr, cache_addr, params, true, false);
+ const auto [surface, view] = GetSurface(gpu_addr, *cpu_addr, params, false);
if (guard_samplers) {
sampled_textures.push_back(surface);
}
@@ -145,7 +143,7 @@ public:
return any_rt;
}
- TView GetDepthBufferSurface(bool preserve_contents) {
+ TView GetDepthBufferSurface() {
std::lock_guard lock{mutex};
auto& maxwell3d = system.GPU().Maxwell3D();
if (!maxwell3d.dirty.flags[VideoCommon::Dirty::ZetaBuffer]) {
@@ -159,14 +157,14 @@ public:
SetEmptyDepthBuffer();
return {};
}
- const auto host_ptr{system.GPU().MemoryManager().GetPointer(gpu_addr)};
- const auto cache_addr{ToCacheAddr(host_ptr)};
- if (!cache_addr) {
+ const std::optional<VAddr> cpu_addr =
+ system.GPU().MemoryManager().GpuToCpuAddress(gpu_addr);
+ if (!cpu_addr) {
SetEmptyDepthBuffer();
return {};
}
const auto depth_params{SurfaceParams::CreateForDepthBuffer(system)};
- auto surface_view = GetSurface(gpu_addr, cache_addr, depth_params, preserve_contents, true);
+ auto surface_view = GetSurface(gpu_addr, *cpu_addr, depth_params, true);
if (depth_buffer.target)
depth_buffer.target->MarkAsRenderTarget(false, NO_RT);
depth_buffer.target = surface_view.first;
@@ -176,7 +174,7 @@ public:
return surface_view.second;
}
- TView GetColorBufferSurface(std::size_t index, bool preserve_contents) {
+ TView GetColorBufferSurface(std::size_t index) {
std::lock_guard lock{mutex};
ASSERT(index < Tegra::Engines::Maxwell3D::Regs::NumRenderTargets);
auto& maxwell3d = system.GPU().Maxwell3D();
@@ -199,16 +197,15 @@ public:
return {};
}
- const auto host_ptr{system.GPU().MemoryManager().GetPointer(gpu_addr)};
- const auto cache_addr{ToCacheAddr(host_ptr)};
- if (!cache_addr) {
+ const std::optional<VAddr> cpu_addr =
+ system.GPU().MemoryManager().GpuToCpuAddress(gpu_addr);
+ if (!cpu_addr) {
SetEmptyColorBuffer(index);
return {};
}
- auto surface_view =
- GetSurface(gpu_addr, cache_addr, SurfaceParams::CreateForFramebuffer(system, index),
- preserve_contents, true);
+ auto surface_view = GetSurface(gpu_addr, *cpu_addr,
+ SurfaceParams::CreateForFramebuffer(system, index), true);
if (render_targets[index].target)
render_targets[index].target->MarkAsRenderTarget(false, NO_RT);
render_targets[index].target = surface_view.first;
@@ -257,27 +254,26 @@ public:
const GPUVAddr src_gpu_addr = src_config.Address();
const GPUVAddr dst_gpu_addr = dst_config.Address();
DeduceBestBlit(src_params, dst_params, src_gpu_addr, dst_gpu_addr);
- const auto dst_host_ptr{system.GPU().MemoryManager().GetPointer(dst_gpu_addr)};
- const auto dst_cache_addr{ToCacheAddr(dst_host_ptr)};
- const auto src_host_ptr{system.GPU().MemoryManager().GetPointer(src_gpu_addr)};
- const auto src_cache_addr{ToCacheAddr(src_host_ptr)};
+ const std::optional<VAddr> dst_cpu_addr =
+ system.GPU().MemoryManager().GpuToCpuAddress(dst_gpu_addr);
+ const std::optional<VAddr> src_cpu_addr =
+ system.GPU().MemoryManager().GpuToCpuAddress(src_gpu_addr);
std::pair<TSurface, TView> dst_surface =
- GetSurface(dst_gpu_addr, dst_cache_addr, dst_params, true, false);
+ GetSurface(dst_gpu_addr, *dst_cpu_addr, dst_params, false);
std::pair<TSurface, TView> src_surface =
- GetSurface(src_gpu_addr, src_cache_addr, src_params, true, false);
+ GetSurface(src_gpu_addr, *src_cpu_addr, src_params, false);
ImageBlit(src_surface.second, dst_surface.second, copy_config);
dst_surface.first->MarkAsModified(true, Tick());
}
- TSurface TryFindFramebufferSurface(const u8* host_ptr) {
- const CacheAddr cache_addr = ToCacheAddr(host_ptr);
- if (!cache_addr) {
+ TSurface TryFindFramebufferSurface(VAddr addr) {
+ if (!addr) {
return nullptr;
}
- const CacheAddr page = cache_addr >> registry_page_bits;
+ const VAddr page = addr >> registry_page_bits;
std::vector<TSurface>& list = registry[page];
for (auto& surface : list) {
- if (surface->GetCacheAddr() == cache_addr) {
+ if (surface->GetCpuAddr() == addr) {
return surface;
}
}
@@ -289,8 +285,9 @@ public:
}
protected:
- TextureCache(Core::System& system, VideoCore::RasterizerInterface& rasterizer)
- : system{system}, rasterizer{rasterizer} {
+ explicit TextureCache(Core::System& system, VideoCore::RasterizerInterface& rasterizer,
+ bool is_astc_supported)
+ : system{system}, is_astc_supported{is_astc_supported}, rasterizer{rasterizer} {
for (std::size_t i = 0; i < Tegra::Engines::Maxwell3D::Regs::NumRenderTargets; i++) {
SetEmptyColorBuffer(i);
}
@@ -337,18 +334,14 @@ protected:
void Register(TSurface surface) {
const GPUVAddr gpu_addr = surface->GetGpuAddr();
- const CacheAddr cache_ptr = ToCacheAddr(system.GPU().MemoryManager().GetPointer(gpu_addr));
const std::size_t size = surface->GetSizeInBytes();
const std::optional<VAddr> cpu_addr =
system.GPU().MemoryManager().GpuToCpuAddress(gpu_addr);
- if (!cache_ptr || !cpu_addr) {
+ if (!cpu_addr) {
LOG_CRITICAL(HW_GPU, "Failed to register surface with unmapped gpu_address 0x{:016x}",
gpu_addr);
return;
}
- const bool continuous = system.GPU().MemoryManager().IsBlockContinuous(gpu_addr, size);
- surface->MarkAsContinuous(continuous);
- surface->SetCacheAddr(cache_ptr);
surface->SetCpuAddr(*cpu_addr);
RegisterInnerCache(surface);
surface->MarkAsRegistered(true);
@@ -381,6 +374,7 @@ protected:
}
Core::System& system;
+ const bool is_astc_supported;
private:
enum class RecycleStrategy : u32 {
@@ -456,22 +450,18 @@ private:
* @param overlaps The overlapping surfaces registered in the cache.
* @param params The parameters for the new surface.
* @param gpu_addr The starting address of the new surface.
- * @param preserve_contents Indicates that the new surface should be loaded from memory or left
- * blank.
* @param untopological Indicates to the recycler that the texture has no way to match the
* overlaps due to topological reasons.
**/
std::pair<TSurface, TView> RecycleSurface(std::vector<TSurface>& overlaps,
const SurfaceParams& params, const GPUVAddr gpu_addr,
- const bool preserve_contents,
const MatchTopologyResult untopological) {
- const bool do_load = preserve_contents && Settings::values.use_accurate_gpu_emulation;
for (auto& surface : overlaps) {
Unregister(surface);
}
switch (PickStrategy(overlaps, params, gpu_addr, untopological)) {
case RecycleStrategy::Ignore: {
- return InitializeSurface(gpu_addr, params, do_load);
+ return InitializeSurface(gpu_addr, params, Settings::values.use_accurate_gpu_emulation);
}
case RecycleStrategy::Flush: {
std::sort(overlaps.begin(), overlaps.end(),
@@ -481,7 +471,7 @@ private:
for (auto& surface : overlaps) {
FlushSurface(surface);
}
- return InitializeSurface(gpu_addr, params, preserve_contents);
+ return InitializeSurface(gpu_addr, params);
}
case RecycleStrategy::BufferCopy: {
auto new_surface = GetUncachedSurface(gpu_addr, params);
@@ -490,7 +480,7 @@ private:
}
default: {
UNIMPLEMENTED_MSG("Unimplemented Texture Cache Recycling Strategy!");
- return InitializeSurface(gpu_addr, params, do_load);
+ return InitializeSurface(gpu_addr, params);
}
}
}
@@ -519,7 +509,9 @@ private:
}
const auto& final_params = new_surface->GetSurfaceParams();
if (cr_params.type != final_params.type) {
- BufferCopy(current_surface, new_surface);
+ if (Settings::values.use_accurate_gpu_emulation) {
+ BufferCopy(current_surface, new_surface);
+ }
} else {
std::vector<CopyParams> bricks = current_surface->BreakDown(final_params);
for (auto& brick : bricks) {
@@ -626,14 +618,11 @@ private:
* @param params The parameters on the new surface.
* @param gpu_addr The starting address of the new surface.
* @param cache_addr The starting address of the new surface on physical memory.
- * @param preserve_contents Indicates that the new surface should be loaded from memory or
- * left blank.
*/
std::optional<std::pair<TSurface, TView>> Manage3DSurfaces(std::vector<TSurface>& overlaps,
const SurfaceParams& params,
const GPUVAddr gpu_addr,
- const CacheAddr cache_addr,
- bool preserve_contents) {
+ const VAddr cpu_addr) {
if (params.target == SurfaceTarget::Texture3D) {
bool failed = false;
if (params.num_levels > 1) {
@@ -657,7 +646,7 @@ private:
failed = true;
break;
}
- const u32 offset = static_cast<u32>(surface->GetCacheAddr() - cache_addr);
+ const u32 offset = static_cast<u32>(surface->GetCpuAddr() - cpu_addr);
const auto [x, y, z] = params.GetBlockOffsetXYZ(offset);
modified |= surface->IsModified();
const CopyParams copy_params(0, 0, 0, 0, 0, z, 0, 0, params.width, params.height,
@@ -677,23 +666,23 @@ private:
} else {
for (const auto& surface : overlaps) {
if (!surface->MatchTarget(params.target)) {
- if (overlaps.size() == 1 && surface->GetCacheAddr() == cache_addr) {
+ if (overlaps.size() == 1 && surface->GetCpuAddr() == cpu_addr) {
if (Settings::values.use_accurate_gpu_emulation) {
return std::nullopt;
}
Unregister(surface);
- return InitializeSurface(gpu_addr, params, preserve_contents);
+ return InitializeSurface(gpu_addr, params);
}
return std::nullopt;
}
- if (surface->GetCacheAddr() != cache_addr) {
+ if (surface->GetCpuAddr() != cpu_addr) {
continue;
}
if (surface->MatchesStructure(params) == MatchStructureResult::FullMatch) {
return {{surface, surface->GetMainView()}};
}
}
- return InitializeSurface(gpu_addr, params, preserve_contents);
+ return InitializeSurface(gpu_addr, params);
}
}
@@ -716,23 +705,19 @@ private:
*
* @param gpu_addr The starting address of the candidate surface.
* @param params The parameters on the candidate surface.
- * @param preserve_contents Indicates that the new surface should be loaded from memory or
- * left blank.
* @param is_render Whether or not the surface is a render target.
**/
- std::pair<TSurface, TView> GetSurface(const GPUVAddr gpu_addr, const CacheAddr cache_addr,
- const SurfaceParams& params, bool preserve_contents,
- bool is_render) {
+ std::pair<TSurface, TView> GetSurface(const GPUVAddr gpu_addr, const VAddr cpu_addr,
+ const SurfaceParams& params, bool is_render) {
// Step 1
// Check Level 1 Cache for a fast structural match. If candidate surface
// matches at certain level we are pretty much done.
- if (const auto iter = l1_cache.find(cache_addr); iter != l1_cache.end()) {
+ if (const auto iter = l1_cache.find(cpu_addr); iter != l1_cache.end()) {
TSurface& current_surface = iter->second;
const auto topological_result = current_surface->MatchesTopology(params);
if (topological_result != MatchTopologyResult::FullMatch) {
std::vector<TSurface> overlaps{current_surface};
- return RecycleSurface(overlaps, params, gpu_addr, preserve_contents,
- topological_result);
+ return RecycleSurface(overlaps, params, gpu_addr, topological_result);
}
const auto struct_result = current_surface->MatchesStructure(params);
@@ -753,11 +738,11 @@ private:
// Step 2
// Obtain all possible overlaps in the memory region
const std::size_t candidate_size = params.GetGuestSizeInBytes();
- auto overlaps{GetSurfacesInRegion(cache_addr, candidate_size)};
+ auto overlaps{GetSurfacesInRegion(cpu_addr, candidate_size)};
// If none are found, we are done. we just load the surface and create it.
if (overlaps.empty()) {
- return InitializeSurface(gpu_addr, params, preserve_contents);
+ return InitializeSurface(gpu_addr, params);
}
// Step 3
@@ -767,15 +752,13 @@ private:
for (const auto& surface : overlaps) {
const auto topological_result = surface->MatchesTopology(params);
if (topological_result != MatchTopologyResult::FullMatch) {
- return RecycleSurface(overlaps, params, gpu_addr, preserve_contents,
- topological_result);
+ return RecycleSurface(overlaps, params, gpu_addr, topological_result);
}
}
// Check if it's a 3D texture
if (params.block_depth > 0) {
- auto surface =
- Manage3DSurfaces(overlaps, params, gpu_addr, cache_addr, preserve_contents);
+ auto surface = Manage3DSurfaces(overlaps, params, gpu_addr, cpu_addr);
if (surface) {
return *surface;
}
@@ -795,8 +778,7 @@ private:
return *view;
}
}
- return RecycleSurface(overlaps, params, gpu_addr, preserve_contents,
- MatchTopologyResult::FullMatch);
+ return RecycleSurface(overlaps, params, gpu_addr, MatchTopologyResult::FullMatch);
}
// Now we check if the candidate is a mipmap/layer of the overlap
std::optional<TView> view =
@@ -820,7 +802,7 @@ private:
pair.first->EmplaceView(params, gpu_addr, candidate_size);
if (mirage_view)
return {pair.first, *mirage_view};
- return RecycleSurface(overlaps, params, gpu_addr, preserve_contents,
+ return RecycleSurface(overlaps, params, gpu_addr,
MatchTopologyResult::FullMatch);
}
return {current_surface, *view};
@@ -836,8 +818,7 @@ private:
}
}
// We failed all the tests, recycle the overlaps into a new texture.
- return RecycleSurface(overlaps, params, gpu_addr, preserve_contents,
- MatchTopologyResult::FullMatch);
+ return RecycleSurface(overlaps, params, gpu_addr, MatchTopologyResult::FullMatch);
}
/**
@@ -850,16 +831,16 @@ private:
* @param params The parameters on the candidate surface.
**/
Deduction DeduceSurface(const GPUVAddr gpu_addr, const SurfaceParams& params) {
- const auto host_ptr{system.GPU().MemoryManager().GetPointer(gpu_addr)};
- const auto cache_addr{ToCacheAddr(host_ptr)};
+ const std::optional<VAddr> cpu_addr =
+ system.GPU().MemoryManager().GpuToCpuAddress(gpu_addr);
- if (!cache_addr) {
+ if (!cpu_addr) {
Deduction result{};
result.type = DeductionType::DeductionFailed;
return result;
}
- if (const auto iter = l1_cache.find(cache_addr); iter != l1_cache.end()) {
+ if (const auto iter = l1_cache.find(*cpu_addr); iter != l1_cache.end()) {
TSurface& current_surface = iter->second;
const auto topological_result = current_surface->MatchesTopology(params);
if (topological_result != MatchTopologyResult::FullMatch) {
@@ -878,7 +859,7 @@ private:
}
const std::size_t candidate_size = params.GetGuestSizeInBytes();
- auto overlaps{GetSurfacesInRegion(cache_addr, candidate_size)};
+ auto overlaps{GetSurfacesInRegion(*cpu_addr, candidate_size)};
if (overlaps.empty()) {
Deduction result{};
@@ -995,10 +976,10 @@ private:
}
std::pair<TSurface, TView> InitializeSurface(GPUVAddr gpu_addr, const SurfaceParams& params,
- bool preserve_contents) {
+ bool do_load = true) {
auto new_surface{GetUncachedSurface(gpu_addr, params)};
Register(new_surface);
- if (preserve_contents) {
+ if (do_load) {
LoadSurface(new_surface);
}
return {new_surface, new_surface->GetMainView()};
@@ -1022,10 +1003,10 @@ private:
}
void RegisterInnerCache(TSurface& surface) {
- const CacheAddr cache_addr = surface->GetCacheAddr();
- CacheAddr start = cache_addr >> registry_page_bits;
- const CacheAddr end = (surface->GetCacheAddrEnd() - 1) >> registry_page_bits;
- l1_cache[cache_addr] = surface;
+ const VAddr cpu_addr = surface->GetCpuAddr();
+ VAddr start = cpu_addr >> registry_page_bits;
+ const VAddr end = (surface->GetCpuAddrEnd() - 1) >> registry_page_bits;
+ l1_cache[cpu_addr] = surface;
while (start <= end) {
registry[start].push_back(surface);
start++;
@@ -1033,10 +1014,10 @@ private:
}
void UnregisterInnerCache(TSurface& surface) {
- const CacheAddr cache_addr = surface->GetCacheAddr();
- CacheAddr start = cache_addr >> registry_page_bits;
- const CacheAddr end = (surface->GetCacheAddrEnd() - 1) >> registry_page_bits;
- l1_cache.erase(cache_addr);
+ const VAddr cpu_addr = surface->GetCpuAddr();
+ VAddr start = cpu_addr >> registry_page_bits;
+ const VAddr end = (surface->GetCpuAddrEnd() - 1) >> registry_page_bits;
+ l1_cache.erase(cpu_addr);
while (start <= end) {
auto& reg{registry[start]};
reg.erase(std::find(reg.begin(), reg.end(), surface));
@@ -1044,18 +1025,18 @@ private:
}
}
- std::vector<TSurface> GetSurfacesInRegion(const CacheAddr cache_addr, const std::size_t size) {
+ std::vector<TSurface> GetSurfacesInRegion(const VAddr cpu_addr, const std::size_t size) {
if (size == 0) {
return {};
}
- const CacheAddr cache_addr_end = cache_addr + size;
- CacheAddr start = cache_addr >> registry_page_bits;
- const CacheAddr end = (cache_addr_end - 1) >> registry_page_bits;
+ const VAddr cpu_addr_end = cpu_addr + size;
+ VAddr start = cpu_addr >> registry_page_bits;
+ const VAddr end = (cpu_addr_end - 1) >> registry_page_bits;
std::vector<TSurface> surfaces;
while (start <= end) {
std::vector<TSurface>& list = registry[start];
for (auto& surface : list) {
- if (!surface->IsPicked() && surface->Overlaps(cache_addr, cache_addr_end)) {
+ if (!surface->IsPicked() && surface->Overlaps(cpu_addr, cpu_addr_end)) {
surface->MarkAsPicked(true);
surfaces.push_back(surface);
}
@@ -1144,14 +1125,14 @@ private:
// large in size.
static constexpr u64 registry_page_bits{20};
static constexpr u64 registry_page_size{1 << registry_page_bits};
- std::unordered_map<CacheAddr, std::vector<TSurface>> registry;
+ std::unordered_map<VAddr, std::vector<TSurface>> registry;
static constexpr u32 DEPTH_RT = 8;
static constexpr u32 NO_RT = 0xFFFFFFFF;
// The L1 Cache is used for fast texture lookup before checking the overlaps
// This avoids calculating size and other stuffs.
- std::unordered_map<CacheAddr, TSurface> l1_cache;
+ std::unordered_map<VAddr, TSurface> l1_cache;
/// The surface reserve is a "backup" cache, this is where we put unique surfaces that have
/// previously been used. This is to prevent surfaces from being constantly created and
diff --git a/src/video_core/textures/astc.cpp b/src/video_core/textures/astc.cpp
index 062b4f252..365bde2f1 100644
--- a/src/video_core/textures/astc.cpp
+++ b/src/video_core/textures/astc.cpp
@@ -20,6 +20,8 @@
#include <cstring>
#include <vector>
+#include <boost/container/static_vector.hpp>
+
#include "common/common_types.h"
#include "video_core/textures/astc.h"
@@ -39,25 +41,25 @@ constexpr u32 Popcnt(u32 n) {
class InputBitStream {
public:
- explicit InputBitStream(const u8* ptr, std::size_t start_offset = 0)
- : m_CurByte(ptr), m_NextBit(start_offset % 8) {}
+ constexpr explicit InputBitStream(const u8* ptr, std::size_t start_offset = 0)
+ : cur_byte{ptr}, next_bit{start_offset % 8} {}
- std::size_t GetBitsRead() const {
- return m_BitsRead;
+ constexpr std::size_t GetBitsRead() const {
+ return bits_read;
}
- u32 ReadBit() {
- u32 bit = *m_CurByte >> m_NextBit++;
- while (m_NextBit >= 8) {
- m_NextBit -= 8;
- m_CurByte++;
+ constexpr bool ReadBit() {
+ const bool bit = (*cur_byte >> next_bit++) & 1;
+ while (next_bit >= 8) {
+ next_bit -= 8;
+ cur_byte++;
}
- m_BitsRead++;
- return bit & 1;
+ bits_read++;
+ return bit;
}
- u32 ReadBits(std::size_t nBits) {
+ constexpr u32 ReadBits(std::size_t nBits) {
u32 ret = 0;
for (std::size_t i = 0; i < nBits; ++i) {
ret |= (ReadBit() & 1) << i;
@@ -66,7 +68,7 @@ public:
}
template <std::size_t nBits>
- u32 ReadBits() {
+ constexpr u32 ReadBits() {
u32 ret = 0;
for (std::size_t i = 0; i < nBits; ++i) {
ret |= (ReadBit() & 1) << i;
@@ -75,64 +77,58 @@ public:
}
private:
- const u8* m_CurByte;
- std::size_t m_NextBit = 0;
- std::size_t m_BitsRead = 0;
+ const u8* cur_byte;
+ std::size_t next_bit = 0;
+ std::size_t bits_read = 0;
};
class OutputBitStream {
public:
- explicit OutputBitStream(u8* ptr, s32 nBits = 0, s32 start_offset = 0)
- : m_NumBits(nBits), m_CurByte(ptr), m_NextBit(start_offset % 8) {}
-
- ~OutputBitStream() = default;
+ constexpr explicit OutputBitStream(u8* ptr, std::size_t bits = 0, std::size_t start_offset = 0)
+ : cur_byte{ptr}, num_bits{bits}, next_bit{start_offset % 8} {}
- s32 GetBitsWritten() const {
- return m_BitsWritten;
+ constexpr std::size_t GetBitsWritten() const {
+ return bits_written;
}
- void WriteBitsR(u32 val, u32 nBits) {
+ constexpr void WriteBitsR(u32 val, u32 nBits) {
for (u32 i = 0; i < nBits; i++) {
WriteBit((val >> (nBits - i - 1)) & 1);
}
}
- void WriteBits(u32 val, u32 nBits) {
+ constexpr void WriteBits(u32 val, u32 nBits) {
for (u32 i = 0; i < nBits; i++) {
WriteBit((val >> i) & 1);
}
}
private:
- void WriteBit(s32 b) {
-
- if (done)
+ constexpr void WriteBit(bool b) {
+ if (bits_written >= num_bits) {
return;
+ }
- const u32 mask = 1 << m_NextBit++;
+ const u32 mask = 1 << next_bit++;
// clear the bit
- *m_CurByte &= static_cast<u8>(~mask);
+ *cur_byte &= static_cast<u8>(~mask);
// Write the bit, if necessary
if (b)
- *m_CurByte |= static_cast<u8>(mask);
+ *cur_byte |= static_cast<u8>(mask);
// Next byte?
- if (m_NextBit >= 8) {
- m_CurByte += 1;
- m_NextBit = 0;
+ if (next_bit >= 8) {
+ cur_byte += 1;
+ next_bit = 0;
}
-
- done = done || ++m_BitsWritten >= m_NumBits;
}
- s32 m_BitsWritten = 0;
- const s32 m_NumBits;
- u8* m_CurByte;
- s32 m_NextBit = 0;
-
- bool done = false;
+ u8* cur_byte;
+ std::size_t num_bits;
+ std::size_t bits_written = 0;
+ std::size_t next_bit = 0;
};
template <typename IntType>
@@ -195,9 +191,13 @@ struct IntegerEncodedValue {
u32 trit_value;
};
};
+using IntegerEncodedVector = boost::container::static_vector<
+ IntegerEncodedValue, 64,
+ boost::container::static_vector_options<
+ boost::container::inplace_alignment<alignof(IntegerEncodedValue)>,
+ boost::container::throw_on_overflow<false>>::type>;
-static void DecodeTritBlock(InputBitStream& bits, std::vector<IntegerEncodedValue>& result,
- u32 nBitsPerValue) {
+static void DecodeTritBlock(InputBitStream& bits, IntegerEncodedVector& result, u32 nBitsPerValue) {
// Implement the algorithm in section C.2.12
u32 m[5];
u32 t[5];
@@ -255,7 +255,7 @@ static void DecodeTritBlock(InputBitStream& bits, std::vector<IntegerEncodedValu
}
}
-static void DecodeQus32Block(InputBitStream& bits, std::vector<IntegerEncodedValue>& result,
+static void DecodeQus32Block(InputBitStream& bits, IntegerEncodedVector& result,
u32 nBitsPerValue) {
// Implement the algorithm in section C.2.12
u32 m[3];
@@ -343,8 +343,8 @@ static constexpr std::array EncodingsValues = MakeEncodedValues();
// Fills result with the values that are encoded in the given
// bitstream. We must know beforehand what the maximum possible
// value is, and how many values we're decoding.
-static void DecodeIntegerSequence(std::vector<IntegerEncodedValue>& result, InputBitStream& bits,
- u32 maxRange, u32 nValues) {
+static void DecodeIntegerSequence(IntegerEncodedVector& result, InputBitStream& bits, u32 maxRange,
+ u32 nValues) {
// Determine encoding parameters
IntegerEncodedValue val = EncodingsValues[maxRange];
@@ -634,12 +634,14 @@ static void FillError(u32* outBuf, u32 blockWidth, u32 blockHeight) {
// Replicates low numBits such that [(toBit - 1):(toBit - 1 - fromBit)]
// is the same as [(numBits - 1):0] and repeats all the way down.
template <typename IntType>
-static IntType Replicate(IntType val, u32 numBits, u32 toBit) {
- if (numBits == 0)
+static constexpr IntType Replicate(IntType val, u32 numBits, u32 toBit) {
+ if (numBits == 0) {
return 0;
- if (toBit == 0)
+ }
+ if (toBit == 0) {
return 0;
- IntType v = val & static_cast<IntType>((1 << numBits) - 1);
+ }
+ const IntType v = val & static_cast<IntType>((1 << numBits) - 1);
IntType res = v;
u32 reslen = numBits;
while (reslen < toBit) {
@@ -656,6 +658,89 @@ static IntType Replicate(IntType val, u32 numBits, u32 toBit) {
return res;
}
+static constexpr std::size_t NumReplicateEntries(u32 num_bits) {
+ return std::size_t(1) << num_bits;
+}
+
+template <typename IntType, u32 num_bits, u32 to_bit>
+static constexpr auto MakeReplicateTable() {
+ std::array<IntType, NumReplicateEntries(num_bits)> table{};
+ for (IntType value = 0; value < static_cast<IntType>(std::size(table)); ++value) {
+ table[value] = Replicate(value, num_bits, to_bit);
+ }
+ return table;
+}
+
+static constexpr auto REPLICATE_BYTE_TO_16_TABLE = MakeReplicateTable<u32, 8, 16>();
+static constexpr u32 ReplicateByteTo16(std::size_t value) {
+ return REPLICATE_BYTE_TO_16_TABLE[value];
+}
+
+static constexpr auto REPLICATE_BIT_TO_7_TABLE = MakeReplicateTable<u32, 1, 7>();
+static constexpr u32 ReplicateBitTo7(std::size_t value) {
+ return REPLICATE_BIT_TO_7_TABLE[value];
+}
+
+static constexpr auto REPLICATE_BIT_TO_9_TABLE = MakeReplicateTable<u32, 1, 9>();
+static constexpr u32 ReplicateBitTo9(std::size_t value) {
+ return REPLICATE_BIT_TO_9_TABLE[value];
+}
+
+static constexpr auto REPLICATE_1_BIT_TO_8_TABLE = MakeReplicateTable<u32, 1, 8>();
+static constexpr auto REPLICATE_2_BIT_TO_8_TABLE = MakeReplicateTable<u32, 2, 8>();
+static constexpr auto REPLICATE_3_BIT_TO_8_TABLE = MakeReplicateTable<u32, 3, 8>();
+static constexpr auto REPLICATE_4_BIT_TO_8_TABLE = MakeReplicateTable<u32, 4, 8>();
+static constexpr auto REPLICATE_5_BIT_TO_8_TABLE = MakeReplicateTable<u32, 5, 8>();
+static constexpr auto REPLICATE_6_BIT_TO_8_TABLE = MakeReplicateTable<u32, 6, 8>();
+static constexpr auto REPLICATE_7_BIT_TO_8_TABLE = MakeReplicateTable<u32, 7, 8>();
+static constexpr auto REPLICATE_8_BIT_TO_8_TABLE = MakeReplicateTable<u32, 8, 8>();
+/// Use a precompiled table with the most common usages, if it's not in the expected range, fallback
+/// to the runtime implementation
+static constexpr u32 FastReplicateTo8(u32 value, u32 num_bits) {
+ switch (num_bits) {
+ case 1:
+ return REPLICATE_1_BIT_TO_8_TABLE[value];
+ case 2:
+ return REPLICATE_2_BIT_TO_8_TABLE[value];
+ case 3:
+ return REPLICATE_3_BIT_TO_8_TABLE[value];
+ case 4:
+ return REPLICATE_4_BIT_TO_8_TABLE[value];
+ case 5:
+ return REPLICATE_5_BIT_TO_8_TABLE[value];
+ case 6:
+ return REPLICATE_6_BIT_TO_8_TABLE[value];
+ case 7:
+ return REPLICATE_7_BIT_TO_8_TABLE[value];
+ case 8:
+ return REPLICATE_8_BIT_TO_8_TABLE[value];
+ default:
+ return Replicate(value, num_bits, 8);
+ }
+}
+
+static constexpr auto REPLICATE_1_BIT_TO_6_TABLE = MakeReplicateTable<u32, 1, 6>();
+static constexpr auto REPLICATE_2_BIT_TO_6_TABLE = MakeReplicateTable<u32, 2, 6>();
+static constexpr auto REPLICATE_3_BIT_TO_6_TABLE = MakeReplicateTable<u32, 3, 6>();
+static constexpr auto REPLICATE_4_BIT_TO_6_TABLE = MakeReplicateTable<u32, 4, 6>();
+static constexpr auto REPLICATE_5_BIT_TO_6_TABLE = MakeReplicateTable<u32, 5, 6>();
+static constexpr u32 FastReplicateTo6(u32 value, u32 num_bits) {
+ switch (num_bits) {
+ case 1:
+ return REPLICATE_1_BIT_TO_6_TABLE[value];
+ case 2:
+ return REPLICATE_2_BIT_TO_6_TABLE[value];
+ case 3:
+ return REPLICATE_3_BIT_TO_6_TABLE[value];
+ case 4:
+ return REPLICATE_4_BIT_TO_6_TABLE[value];
+ case 5:
+ return REPLICATE_5_BIT_TO_6_TABLE[value];
+ default:
+ return Replicate(value, num_bits, 6);
+ }
+}
+
class Pixel {
protected:
using ChannelType = s16;
@@ -674,10 +759,10 @@ public:
// significant bits when going from larger to smaller bit depth
// or by repeating the most significant bits when going from
// smaller to larger bit depths.
- void ChangeBitDepth(const u8 (&depth)[4]) {
+ void ChangeBitDepth() {
for (u32 i = 0; i < 4; i++) {
- Component(i) = ChangeBitDepth(Component(i), m_BitDepth[i], depth[i]);
- m_BitDepth[i] = depth[i];
+ Component(i) = ChangeBitDepth(Component(i), m_BitDepth[i]);
+ m_BitDepth[i] = 8;
}
}
@@ -689,28 +774,23 @@ public:
// Changes the bit depth of a single component. See the comment
// above for how we do this.
- static ChannelType ChangeBitDepth(Pixel::ChannelType val, u8 oldDepth, u8 newDepth) {
- assert(newDepth <= 8);
+ static ChannelType ChangeBitDepth(Pixel::ChannelType val, u8 oldDepth) {
assert(oldDepth <= 8);
- if (oldDepth == newDepth) {
+ if (oldDepth == 8) {
// Do nothing
return val;
- } else if (oldDepth == 0 && newDepth != 0) {
- return static_cast<ChannelType>((1 << newDepth) - 1);
- } else if (newDepth > oldDepth) {
- return Replicate(val, oldDepth, newDepth);
+ } else if (oldDepth == 0) {
+ return static_cast<ChannelType>((1 << 8) - 1);
+ } else if (8 > oldDepth) {
+ return static_cast<ChannelType>(FastReplicateTo8(static_cast<u32>(val), oldDepth));
} else {
// oldDepth > newDepth
- if (newDepth == 0) {
- return 0xFF;
- } else {
- u8 bitsWasted = static_cast<u8>(oldDepth - newDepth);
- u16 v = static_cast<u16>(val);
- v = static_cast<u16>((v + (1 << (bitsWasted - 1))) >> bitsWasted);
- v = ::std::min<u16>(::std::max<u16>(0, v), static_cast<u16>((1 << newDepth) - 1));
- return static_cast<u8>(v);
- }
+ const u8 bitsWasted = static_cast<u8>(oldDepth - 8);
+ u16 v = static_cast<u16>(val);
+ v = static_cast<u16>((v + (1 << (bitsWasted - 1))) >> bitsWasted);
+ v = ::std::min<u16>(::std::max<u16>(0, v), static_cast<u16>((1 << 8) - 1));
+ return static_cast<u8>(v);
}
assert(false && "We shouldn't get here.");
@@ -760,8 +840,7 @@ public:
// up in the most-significant byte.
u32 Pack() const {
Pixel eightBit(*this);
- const u8 eightBitDepth[4] = {8, 8, 8, 8};
- eightBit.ChangeBitDepth(eightBitDepth);
+ eightBit.ChangeBitDepth();
u32 r = 0;
r |= eightBit.A();
@@ -816,8 +895,7 @@ static void DecodeColorValues(u32* out, u8* data, const u32* modes, const u32 nP
}
// We now have enough to decode our integer sequence.
- std::vector<IntegerEncodedValue> decodedColorValues;
- decodedColorValues.reserve(32);
+ IntegerEncodedVector decodedColorValues;
InputBitStream colorStream(data);
DecodeIntegerSequence(decodedColorValues, colorStream, range, nValues);
@@ -839,12 +917,12 @@ static void DecodeColorValues(u32* out, u8* data, const u32* modes, const u32 nP
u32 A = 0, B = 0, C = 0, D = 0;
// A is just the lsb replicated 9 times.
- A = Replicate(bitval & 1, 1, 9);
+ A = ReplicateBitTo9(bitval & 1);
switch (val.encoding) {
// Replicate bits
case IntegerEncoding::JustBits:
- out[outIdx++] = Replicate(bitval, bitlen, 8);
+ out[outIdx++] = FastReplicateTo8(bitval, bitlen);
break;
// Use algorithm in C.2.13
@@ -962,13 +1040,13 @@ static u32 UnquantizeTexelWeight(const IntegerEncodedValue& val) {
u32 bitval = val.bit_value;
u32 bitlen = val.num_bits;
- u32 A = Replicate(bitval & 1, 1, 7);
+ u32 A = ReplicateBitTo7(bitval & 1);
u32 B = 0, C = 0, D = 0;
u32 result = 0;
switch (val.encoding) {
case IntegerEncoding::JustBits:
- result = Replicate(bitval, bitlen, 6);
+ result = FastReplicateTo6(bitval, bitlen);
break;
case IntegerEncoding::Trit: {
@@ -1047,7 +1125,7 @@ static u32 UnquantizeTexelWeight(const IntegerEncodedValue& val) {
return result;
}
-static void UnquantizeTexelWeights(u32 out[2][144], const std::vector<IntegerEncodedValue>& weights,
+static void UnquantizeTexelWeights(u32 out[2][144], const IntegerEncodedVector& weights,
const TexelWeightParams& params, const u32 blockWidth,
const u32 blockHeight) {
u32 weightIdx = 0;
@@ -1545,8 +1623,7 @@ static void DecompressBlock(const u8 inBuf[16], const u32 blockWidth, const u32
static_cast<u8>((1 << (weightParams.GetPackedBitSize() % 8)) - 1);
memset(texelWeightData + clearByteStart, 0, 16 - clearByteStart);
- std::vector<IntegerEncodedValue> texelWeightValues;
- texelWeightValues.reserve(64);
+ IntegerEncodedVector texelWeightValues;
InputBitStream weightStream(texelWeightData);
@@ -1568,9 +1645,9 @@ static void DecompressBlock(const u8 inBuf[16], const u32 blockWidth, const u32
Pixel p;
for (u32 c = 0; c < 4; c++) {
u32 C0 = endpos32s[partition][0].Component(c);
- C0 = Replicate(C0, 8, 16);
+ C0 = ReplicateByteTo16(C0);
u32 C1 = endpos32s[partition][1].Component(c);
- C1 = Replicate(C1, 8, 16);
+ C1 = ReplicateByteTo16(C1);
u32 plane = 0;
if (weightParams.m_bDualPlane && (((planeIdx + 1) & 3) == c)) {
diff --git a/src/video_core/textures/texture.cpp b/src/video_core/textures/texture.cpp
new file mode 100644
index 000000000..d1939d744
--- /dev/null
+++ b/src/video_core/textures/texture.cpp
@@ -0,0 +1,80 @@
+// Copyright 2020 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include <algorithm>
+#include <array>
+
+#include "core/settings.h"
+#include "video_core/textures/texture.h"
+
+namespace Tegra::Texture {
+
+namespace {
+
+constexpr std::array<float, 256> SRGB_CONVERSION_LUT = {
+ 0.000000f, 0.000000f, 0.000000f, 0.000012f, 0.000021f, 0.000033f, 0.000046f, 0.000062f,
+ 0.000081f, 0.000102f, 0.000125f, 0.000151f, 0.000181f, 0.000214f, 0.000251f, 0.000293f,
+ 0.000338f, 0.000388f, 0.000443f, 0.000503f, 0.000568f, 0.000639f, 0.000715f, 0.000798f,
+ 0.000887f, 0.000983f, 0.001085f, 0.001195f, 0.001312f, 0.001437f, 0.001569f, 0.001710f,
+ 0.001860f, 0.002019f, 0.002186f, 0.002364f, 0.002551f, 0.002748f, 0.002955f, 0.003174f,
+ 0.003403f, 0.003643f, 0.003896f, 0.004160f, 0.004436f, 0.004725f, 0.005028f, 0.005343f,
+ 0.005672f, 0.006015f, 0.006372f, 0.006744f, 0.007130f, 0.007533f, 0.007950f, 0.008384f,
+ 0.008834f, 0.009301f, 0.009785f, 0.010286f, 0.010805f, 0.011342f, 0.011898f, 0.012472f,
+ 0.013066f, 0.013680f, 0.014313f, 0.014967f, 0.015641f, 0.016337f, 0.017054f, 0.017793f,
+ 0.018554f, 0.019337f, 0.020144f, 0.020974f, 0.021828f, 0.022706f, 0.023609f, 0.024536f,
+ 0.025489f, 0.026468f, 0.027473f, 0.028504f, 0.029563f, 0.030649f, 0.031762f, 0.032904f,
+ 0.034074f, 0.035274f, 0.036503f, 0.037762f, 0.039050f, 0.040370f, 0.041721f, 0.043103f,
+ 0.044518f, 0.045964f, 0.047444f, 0.048956f, 0.050503f, 0.052083f, 0.053699f, 0.055349f,
+ 0.057034f, 0.058755f, 0.060513f, 0.062307f, 0.064139f, 0.066008f, 0.067915f, 0.069861f,
+ 0.071845f, 0.073869f, 0.075933f, 0.078037f, 0.080182f, 0.082369f, 0.084597f, 0.086867f,
+ 0.089180f, 0.091535f, 0.093935f, 0.096378f, 0.098866f, 0.101398f, 0.103977f, 0.106601f,
+ 0.109271f, 0.111988f, 0.114753f, 0.117565f, 0.120426f, 0.123335f, 0.126293f, 0.129301f,
+ 0.132360f, 0.135469f, 0.138629f, 0.141841f, 0.145105f, 0.148421f, 0.151791f, 0.155214f,
+ 0.158691f, 0.162224f, 0.165810f, 0.169453f, 0.173152f, 0.176907f, 0.180720f, 0.184589f,
+ 0.188517f, 0.192504f, 0.196549f, 0.200655f, 0.204820f, 0.209046f, 0.213334f, 0.217682f,
+ 0.222093f, 0.226567f, 0.231104f, 0.235704f, 0.240369f, 0.245099f, 0.249894f, 0.254754f,
+ 0.259681f, 0.264674f, 0.269736f, 0.274864f, 0.280062f, 0.285328f, 0.290664f, 0.296070f,
+ 0.301546f, 0.307094f, 0.312713f, 0.318404f, 0.324168f, 0.330006f, 0.335916f, 0.341902f,
+ 0.347962f, 0.354097f, 0.360309f, 0.366597f, 0.372961f, 0.379403f, 0.385924f, 0.392524f,
+ 0.399202f, 0.405960f, 0.412798f, 0.419718f, 0.426719f, 0.433802f, 0.440967f, 0.448216f,
+ 0.455548f, 0.462965f, 0.470465f, 0.478052f, 0.485725f, 0.493484f, 0.501329f, 0.509263f,
+ 0.517285f, 0.525396f, 0.533595f, 0.541885f, 0.550265f, 0.558736f, 0.567299f, 0.575954f,
+ 0.584702f, 0.593542f, 0.602477f, 0.611507f, 0.620632f, 0.629852f, 0.639168f, 0.648581f,
+ 0.658092f, 0.667700f, 0.677408f, 0.687214f, 0.697120f, 0.707127f, 0.717234f, 0.727443f,
+ 0.737753f, 0.748167f, 0.758685f, 0.769305f, 0.780031f, 0.790861f, 0.801798f, 0.812839f,
+ 0.823989f, 0.835246f, 0.846611f, 0.858085f, 0.869668f, 0.881360f, 0.893164f, 0.905078f,
+ 0.917104f, 0.929242f, 0.941493f, 0.953859f, 0.966338f, 1.000000f, 1.000000f, 1.000000f,
+};
+
+unsigned SettingsMinimumAnisotropy() noexcept {
+ switch (static_cast<Anisotropy>(Settings::values.max_anisotropy)) {
+ default:
+ case Anisotropy::Default:
+ return 1U;
+ case Anisotropy::Filter2x:
+ return 2U;
+ case Anisotropy::Filter4x:
+ return 4U;
+ case Anisotropy::Filter8x:
+ return 8U;
+ case Anisotropy::Filter16x:
+ return 16U;
+ }
+}
+
+} // Anonymous namespace
+
+std::array<float, 4> TSCEntry::GetBorderColor() const noexcept {
+ if (!srgb_conversion) {
+ return border_color;
+ }
+ return {SRGB_CONVERSION_LUT[srgb_border_color_r], SRGB_CONVERSION_LUT[srgb_border_color_g],
+ SRGB_CONVERSION_LUT[srgb_border_color_b], border_color[3]};
+}
+
+float TSCEntry::GetMaxAnisotropy() const noexcept {
+ return static_cast<float>(std::max(1U << max_anisotropy, SettingsMinimumAnisotropy()));
+}
+
+} // namespace Tegra::Texture
diff --git a/src/video_core/textures/texture.h b/src/video_core/textures/texture.h
index 7edc4abe1..eba05aced 100644
--- a/src/video_core/textures/texture.h
+++ b/src/video_core/textures/texture.h
@@ -8,7 +8,6 @@
#include "common/assert.h"
#include "common/bit_field.h"
#include "common/common_types.h"
-#include "core/settings.h"
namespace Tegra::Texture {
@@ -132,6 +131,20 @@ enum class SwizzleSource : u32 {
OneFloat = 7,
};
+enum class MsaaMode : u32 {
+ Msaa1x1 = 0,
+ Msaa2x1 = 1,
+ Msaa2x2 = 2,
+ Msaa4x2 = 3,
+ Msaa4x2_D3D = 4,
+ Msaa2x1_D3D = 5,
+ Msaa4x4 = 6,
+ Msaa2x2_VC4 = 8,
+ Msaa2x2_VC12 = 9,
+ Msaa4x2_VC8 = 10,
+ Msaa4x2_VC24 = 11,
+};
+
union TextureHandle {
TextureHandle(u32 raw) : raw{raw} {}
@@ -198,6 +211,7 @@ struct TICEntry {
union {
BitField<0, 4, u32> res_min_mip_level;
BitField<4, 4, u32> res_max_mip_level;
+ BitField<8, 4, MsaaMode> msaa_mode;
BitField<12, 12, u32> min_lod_clamp;
};
@@ -336,24 +350,9 @@ struct TSCEntry {
std::array<u8, 0x20> raw;
};
- float GetMaxAnisotropy() const {
- const u32 min_value = [] {
- switch (static_cast<Anisotropy>(Settings::values.max_anisotropy)) {
- default:
- case Anisotropy::Default:
- return 1U;
- case Anisotropy::Filter2x:
- return 2U;
- case Anisotropy::Filter4x:
- return 4U;
- case Anisotropy::Filter8x:
- return 8U;
- case Anisotropy::Filter16x:
- return 16U;
- }
- }();
- return static_cast<float>(std::max(1U << max_anisotropy, min_value));
- }
+ std::array<float, 4> GetBorderColor() const noexcept;
+
+ float GetMaxAnisotropy() const noexcept;
float GetMinLod() const {
return static_cast<float>(min_lod_clamp) / 256.0f;
@@ -368,15 +367,6 @@ struct TSCEntry {
constexpr u32 mask = 1U << (13 - 1);
return static_cast<float>(static_cast<s32>((mip_lod_bias ^ mask) - mask)) / 256.0f;
}
-
- std::array<float, 4> GetBorderColor() const {
- if (srgb_conversion) {
- return {static_cast<float>(srgb_border_color_r) / 255.0f,
- static_cast<float>(srgb_border_color_g) / 255.0f,
- static_cast<float>(srgb_border_color_b) / 255.0f, border_color[3]};
- }
- return border_color;
- }
};
static_assert(sizeof(TSCEntry) == 0x20, "TSCEntry has wrong size");