summaryrefslogtreecommitdiffstats
path: root/src/video_core
diff options
context:
space:
mode:
Diffstat (limited to 'src/video_core')
-rw-r--r--src/video_core/buffer_cache/buffer_base.h9
-rw-r--r--src/video_core/buffer_cache/buffer_cache.h176
-rw-r--r--src/video_core/buffer_cache/buffer_cache_base.h10
-rw-r--r--src/video_core/buffer_cache/memory_tracker_base.h26
-rw-r--r--src/video_core/buffer_cache/word_manager.h27
-rw-r--r--src/video_core/engines/maxwell_dma.cpp8
-rw-r--r--src/video_core/engines/sw_blitter/blitter.cpp25
-rw-r--r--src/video_core/fence_manager.h5
-rw-r--r--src/video_core/gpu.cpp19
-rw-r--r--src/video_core/gpu.h4
-rw-r--r--src/video_core/host1x/codecs/h264.cpp4
-rw-r--r--src/video_core/query_cache.h2
-rw-r--r--src/video_core/rasterizer_download_area.h16
-rw-r--r--src/video_core/rasterizer_interface.h3
-rw-r--r--src/video_core/renderer_null/null_rasterizer.cpp10
-rw-r--r--src/video_core/renderer_null/null_rasterizer.h1
-rw-r--r--src/video_core/renderer_opengl/gl_device.cpp3
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer.cpp25
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer.h1
-rw-r--r--src/video_core/renderer_opengl/gl_texture_cache.cpp11
-rw-r--r--src/video_core/renderer_opengl/gl_texture_cache.h1
-rw-r--r--src/video_core/renderer_vulkan/pipeline_helper.h6
-rw-r--r--src/video_core/renderer_vulkan/vk_buffer_cache.cpp10
-rw-r--r--src/video_core/renderer_vulkan/vk_buffer_cache.h9
-rw-r--r--src/video_core/renderer_vulkan/vk_compute_pass.cpp39
-rw-r--r--src/video_core/renderer_vulkan/vk_compute_pass.h14
-rw-r--r--src/video_core/renderer_vulkan/vk_compute_pipeline.cpp12
-rw-r--r--src/video_core/renderer_vulkan/vk_compute_pipeline.h4
-rw-r--r--src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp10
-rw-r--r--src/video_core/renderer_vulkan/vk_graphics_pipeline.h5
-rw-r--r--src/video_core/renderer_vulkan/vk_pipeline_cache.cpp8
-rw-r--r--src/video_core/renderer_vulkan/vk_pipeline_cache.h5
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.cpp40
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.h4
-rw-r--r--src/video_core/renderer_vulkan/vk_texture_cache.cpp16
-rw-r--r--src/video_core/renderer_vulkan/vk_texture_cache.h7
-rw-r--r--src/video_core/renderer_vulkan/vk_update_descriptor.h6
-rw-r--r--src/video_core/texture_cache/formatter.cpp22
-rw-r--r--src/video_core/texture_cache/formatter.h2
-rw-r--r--src/video_core/texture_cache/image_info.cpp12
-rw-r--r--src/video_core/texture_cache/image_info.h2
-rw-r--r--src/video_core/texture_cache/image_view_base.cpp12
-rw-r--r--src/video_core/texture_cache/image_view_base.h7
-rw-r--r--src/video_core/texture_cache/texture_cache.h53
-rw-r--r--src/video_core/texture_cache/texture_cache_base.h4
-rw-r--r--src/video_core/texture_cache/util.cpp10
-rw-r--r--src/video_core/vulkan_common/vulkan_device.cpp2
-rw-r--r--src/video_core/vulkan_common/vulkan_device.h3
-rw-r--r--src/video_core/vulkan_common/vulkan_memory_allocator.cpp2
49 files changed, 461 insertions, 251 deletions
diff --git a/src/video_core/buffer_cache/buffer_base.h b/src/video_core/buffer_cache/buffer_base.h
index 9cbd95c4b..0bb3bf8ae 100644
--- a/src/video_core/buffer_cache/buffer_base.h
+++ b/src/video_core/buffer_cache/buffer_base.h
@@ -18,6 +18,7 @@ namespace VideoCommon {
enum class BufferFlagBits {
Picked = 1 << 0,
CachedWrites = 1 << 1,
+ PreemtiveDownload = 1 << 2,
};
DECLARE_ENUM_FLAG_OPERATORS(BufferFlagBits)
@@ -54,6 +55,10 @@ public:
flags |= BufferFlagBits::Picked;
}
+ void MarkPreemtiveDownload() noexcept {
+ flags |= BufferFlagBits::PreemtiveDownload;
+ }
+
/// Unmark buffer as picked
void Unpick() noexcept {
flags &= ~BufferFlagBits::Picked;
@@ -84,6 +89,10 @@ public:
return True(flags & BufferFlagBits::CachedWrites);
}
+ bool IsPreemtiveDownload() const noexcept {
+ return True(flags & BufferFlagBits::PreemtiveDownload);
+ }
+
/// Returns the base CPU address of the buffer
[[nodiscard]] VAddr CpuAddr() const noexcept {
return cpu_addr;
diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h
index e534e1e9c..98756e4da 100644
--- a/src/video_core/buffer_cache/buffer_cache.h
+++ b/src/video_core/buffer_cache/buffer_cache.h
@@ -23,8 +23,6 @@ BufferCache<P>::BufferCache(VideoCore::RasterizerInterface& rasterizer_,
common_ranges.clear();
inline_buffer_id = NULL_BUFFER_ID;
- active_async_buffers = !Settings::IsGPULevelHigh();
-
if (!runtime.CanReportMemoryUsage()) {
minimum_memory = DEFAULT_EXPECTED_MEMORY;
critical_memory = DEFAULT_CRITICAL_MEMORY;
@@ -75,8 +73,6 @@ void BufferCache<P>::TickFrame() {
uniform_cache_hits[0] = 0;
uniform_cache_shots[0] = 0;
- active_async_buffers = !Settings::IsGPULevelHigh();
-
const bool skip_preferred = hits * 256 < shots * 251;
uniform_buffer_skip_cache_size = skip_preferred ? DEFAULT_SKIP_CACHE_SIZE : 0;
@@ -100,51 +96,50 @@ void BufferCache<P>::TickFrame() {
template <class P>
void BufferCache<P>::WriteMemory(VAddr cpu_addr, u64 size) {
- memory_tracker.MarkRegionAsCpuModified(cpu_addr, size);
if (memory_tracker.IsRegionGpuModified(cpu_addr, size)) {
const IntervalType subtract_interval{cpu_addr, cpu_addr + size};
ClearDownload(subtract_interval);
common_ranges.subtract(subtract_interval);
}
+ memory_tracker.MarkRegionAsCpuModified(cpu_addr, size);
}
template <class P>
void BufferCache<P>::CachedWriteMemory(VAddr cpu_addr, u64 size) {
memory_tracker.CachedCpuWrite(cpu_addr, size);
- const IntervalType add_interval{Common::AlignDown(cpu_addr, YUZU_PAGESIZE),
- Common::AlignUp(cpu_addr + size, YUZU_PAGESIZE)};
- cached_ranges.add(add_interval);
+}
+
+template <class P>
+std::optional<VideoCore::RasterizerDownloadArea> BufferCache<P>::GetFlushArea(VAddr cpu_addr,
+ u64 size) {
+ std::optional<VideoCore::RasterizerDownloadArea> area{};
+ area.emplace();
+ VAddr cpu_addr_start_aligned = Common::AlignDown(cpu_addr, Core::Memory::YUZU_PAGESIZE);
+ VAddr cpu_addr_end_aligned = Common::AlignUp(cpu_addr + size, Core::Memory::YUZU_PAGESIZE);
+ area->start_address = cpu_addr_start_aligned;
+ area->end_address = cpu_addr_end_aligned;
+ if (memory_tracker.IsRegionPreflushable(cpu_addr, size)) {
+ area->preemtive = true;
+ return area;
+ };
+ area->preemtive =
+ !IsRegionGpuModified(cpu_addr_start_aligned, cpu_addr_end_aligned - cpu_addr_start_aligned);
+ memory_tracker.MarkRegionAsPreflushable(cpu_addr_start_aligned,
+ cpu_addr_end_aligned - cpu_addr_start_aligned);
+ return area;
}
template <class P>
void BufferCache<P>::DownloadMemory(VAddr cpu_addr, u64 size) {
- WaitOnAsyncFlushes(cpu_addr, size);
ForEachBufferInRange(cpu_addr, size, [&](BufferId, Buffer& buffer) {
DownloadBufferMemory(buffer, cpu_addr, size);
});
}
template <class P>
-void BufferCache<P>::WaitOnAsyncFlushes(VAddr cpu_addr, u64 size) {
- bool must_wait = false;
- ForEachInOverlapCounter(async_downloads, cpu_addr, size,
- [&](VAddr, VAddr, int) { must_wait = true; });
- bool must_release = false;
- ForEachInRangeSet(pending_ranges, cpu_addr, size, [&](VAddr, VAddr) { must_release = true; });
- if (must_release) {
- std::function<void()> tmp([]() {});
- rasterizer.SignalFence(std::move(tmp));
- }
- if (must_wait || must_release) {
- rasterizer.ReleaseFences();
- }
-}
-
-template <class P>
void BufferCache<P>::ClearDownload(IntervalType subtract_interval) {
RemoveEachInOverlapCounter(async_downloads, subtract_interval, -1024);
uncommitted_ranges.subtract(subtract_interval);
- pending_ranges.subtract(subtract_interval);
for (auto& interval_set : committed_ranges) {
interval_set.subtract(subtract_interval);
}
@@ -164,7 +159,6 @@ bool BufferCache<P>::DMACopy(GPUVAddr src_address, GPUVAddr dest_address, u64 am
}
const IntervalType subtract_interval{*cpu_dest_address, *cpu_dest_address + amount};
- WaitOnAsyncFlushes(*cpu_src_address, static_cast<u32>(amount));
ClearDownload(subtract_interval);
BufferId buffer_a;
@@ -192,7 +186,6 @@ bool BufferCache<P>::DMACopy(GPUVAddr src_address, GPUVAddr dest_address, u64 am
const IntervalType add_interval{new_base_address, new_base_address + size};
tmp_intervals.push_back(add_interval);
uncommitted_ranges.add(add_interval);
- pending_ranges.add(add_interval);
};
ForEachInRangeSet(common_ranges, *cpu_src_address, amount, mirror);
// This subtraction in this order is important for overlapping copies.
@@ -205,7 +198,7 @@ bool BufferCache<P>::DMACopy(GPUVAddr src_address, GPUVAddr dest_address, u64 am
if (has_new_downloads) {
memory_tracker.MarkRegionAsGpuModified(*cpu_dest_address, amount);
}
- std::vector<u8> tmp_buffer(amount);
+ tmp_buffer.resize(amount);
cpu_memory.ReadBlockUnsafe(*cpu_src_address, tmp_buffer.data(), amount);
cpu_memory.WriteBlockUnsafe(*cpu_dest_address, tmp_buffer.data(), amount);
return true;
@@ -441,9 +434,7 @@ void BufferCache<P>::BindComputeTextureBuffer(size_t tbo_index, GPUVAddr gpu_add
template <class P>
void BufferCache<P>::FlushCachedWrites() {
- cached_write_buffer_ids.clear();
memory_tracker.FlushCachedWrites();
- cached_ranges.clear();
}
template <class P>
@@ -474,15 +465,13 @@ void BufferCache<P>::CommitAsyncFlushesHigh() {
if (committed_ranges.empty()) {
if constexpr (IMPLEMENTS_ASYNC_DOWNLOADS) {
- if (active_async_buffers) {
- async_buffers.emplace_back(std::optional<Async_Buffer>{});
- }
+
+ async_buffers.emplace_back(std::optional<Async_Buffer>{});
}
return;
}
MICROPROFILE_SCOPE(GPU_DownloadMemory);
- pending_ranges.clear();
auto it = committed_ranges.begin();
while (it != committed_ranges.end()) {
auto& current_intervals = *it;
@@ -537,64 +526,65 @@ void BufferCache<P>::CommitAsyncFlushesHigh() {
committed_ranges.clear();
if (downloads.empty()) {
if constexpr (IMPLEMENTS_ASYNC_DOWNLOADS) {
- if (active_async_buffers) {
- async_buffers.emplace_back(std::optional<Async_Buffer>{});
- }
+
+ async_buffers.emplace_back(std::optional<Async_Buffer>{});
}
return;
}
- if (active_async_buffers) {
- if constexpr (IMPLEMENTS_ASYNC_DOWNLOADS) {
- auto download_staging = runtime.DownloadStagingBuffer(total_size_bytes, true);
- boost::container::small_vector<BufferCopy, 4> normalized_copies;
- IntervalSet new_async_range{};
- runtime.PreCopyBarrier();
- for (auto& [copy, buffer_id] : downloads) {
- copy.dst_offset += download_staging.offset;
- const std::array copies{copy};
- BufferCopy second_copy{copy};
- Buffer& buffer = slot_buffers[buffer_id];
- second_copy.src_offset = static_cast<size_t>(buffer.CpuAddr()) + copy.src_offset;
- VAddr orig_cpu_addr = static_cast<VAddr>(second_copy.src_offset);
- const IntervalType base_interval{orig_cpu_addr, orig_cpu_addr + copy.size};
- async_downloads += std::make_pair(base_interval, 1);
- runtime.CopyBuffer(download_staging.buffer, buffer, copies, false);
- normalized_copies.push_back(second_copy);
- }
- runtime.PostCopyBarrier();
- pending_downloads.emplace_back(std::move(normalized_copies));
- async_buffers.emplace_back(download_staging);
- } else {
- committed_ranges.clear();
- uncommitted_ranges.clear();
+ if constexpr (IMPLEMENTS_ASYNC_DOWNLOADS) {
+ auto download_staging = runtime.DownloadStagingBuffer(total_size_bytes, true);
+ boost::container::small_vector<BufferCopy, 4> normalized_copies;
+ IntervalSet new_async_range{};
+ runtime.PreCopyBarrier();
+ for (auto& [copy, buffer_id] : downloads) {
+ copy.dst_offset += download_staging.offset;
+ const std::array copies{copy};
+ BufferCopy second_copy{copy};
+ Buffer& buffer = slot_buffers[buffer_id];
+ second_copy.src_offset = static_cast<size_t>(buffer.CpuAddr()) + copy.src_offset;
+ VAddr orig_cpu_addr = static_cast<VAddr>(second_copy.src_offset);
+ const IntervalType base_interval{orig_cpu_addr, orig_cpu_addr + copy.size};
+ async_downloads += std::make_pair(base_interval, 1);
+ runtime.CopyBuffer(download_staging.buffer, buffer, copies, false);
+ normalized_copies.push_back(second_copy);
}
+ runtime.PostCopyBarrier();
+ pending_downloads.emplace_back(std::move(normalized_copies));
+ async_buffers.emplace_back(download_staging);
} else {
- if constexpr (USE_MEMORY_MAPS) {
- auto download_staging = runtime.DownloadStagingBuffer(total_size_bytes);
- runtime.PreCopyBarrier();
- for (auto& [copy, buffer_id] : downloads) {
- // Have in mind the staging buffer offset for the copy
- copy.dst_offset += download_staging.offset;
- const std::array copies{copy};
- runtime.CopyBuffer(download_staging.buffer, slot_buffers[buffer_id], copies, false);
- }
- runtime.PostCopyBarrier();
- runtime.Finish();
- for (const auto& [copy, buffer_id] : downloads) {
- const Buffer& buffer = slot_buffers[buffer_id];
- const VAddr cpu_addr = buffer.CpuAddr() + copy.src_offset;
- // Undo the modified offset
- const u64 dst_offset = copy.dst_offset - download_staging.offset;
- const u8* read_mapped_memory = download_staging.mapped_span.data() + dst_offset;
- cpu_memory.WriteBlockUnsafe(cpu_addr, read_mapped_memory, copy.size);
- }
+ if (!Settings::IsGPULevelHigh()) {
+ committed_ranges.clear();
+ uncommitted_ranges.clear();
} else {
- const std::span<u8> immediate_buffer = ImmediateBuffer(largest_copy);
- for (const auto& [copy, buffer_id] : downloads) {
- Buffer& buffer = slot_buffers[buffer_id];
- buffer.ImmediateDownload(copy.src_offset, immediate_buffer.subspan(0, copy.size));
- const VAddr cpu_addr = buffer.CpuAddr() + copy.src_offset;
- cpu_memory.WriteBlockUnsafe(cpu_addr, immediate_buffer.data(), copy.size);
+ if constexpr (USE_MEMORY_MAPS) {
+ auto download_staging = runtime.DownloadStagingBuffer(total_size_bytes);
+ runtime.PreCopyBarrier();
+ for (auto& [copy, buffer_id] : downloads) {
+ // Have in mind the staging buffer offset for the copy
+ copy.dst_offset += download_staging.offset;
+ const std::array copies{copy};
+ runtime.CopyBuffer(download_staging.buffer, slot_buffers[buffer_id], copies,
+ false);
+ }
+ runtime.PostCopyBarrier();
+ runtime.Finish();
+ for (const auto& [copy, buffer_id] : downloads) {
+ const Buffer& buffer = slot_buffers[buffer_id];
+ const VAddr cpu_addr = buffer.CpuAddr() + copy.src_offset;
+ // Undo the modified offset
+ const u64 dst_offset = copy.dst_offset - download_staging.offset;
+ const u8* read_mapped_memory = download_staging.mapped_span.data() + dst_offset;
+ cpu_memory.WriteBlockUnsafe(cpu_addr, read_mapped_memory, copy.size);
+ }
+ } else {
+ const std::span<u8> immediate_buffer = ImmediateBuffer(largest_copy);
+ for (const auto& [copy, buffer_id] : downloads) {
+ Buffer& buffer = slot_buffers[buffer_id];
+ buffer.ImmediateDownload(copy.src_offset,
+ immediate_buffer.subspan(0, copy.size));
+ const VAddr cpu_addr = buffer.CpuAddr() + copy.src_offset;
+ cpu_memory.WriteBlockUnsafe(cpu_addr, immediate_buffer.data(), copy.size);
+ }
}
}
}
@@ -1213,16 +1203,14 @@ void BufferCache<P>::UpdateComputeTextureBuffers() {
template <class P>
void BufferCache<P>::MarkWrittenBuffer(BufferId buffer_id, VAddr cpu_addr, u32 size) {
- memory_tracker.MarkRegionAsGpuModified(cpu_addr, size);
-
if (memory_tracker.IsRegionCpuModified(cpu_addr, size)) {
SynchronizeBuffer(slot_buffers[buffer_id], cpu_addr, size);
}
+ memory_tracker.MarkRegionAsGpuModified(cpu_addr, size);
const IntervalType base_interval{cpu_addr, cpu_addr + size};
common_ranges.add(base_interval);
uncommitted_ranges.add(base_interval);
- pending_ranges.add(base_interval);
}
template <class P>
@@ -1629,7 +1617,6 @@ void BufferCache<P>::DeleteBuffer(BufferId buffer_id, bool do_not_mark) {
replace(transform_feedback_buffers);
replace(compute_uniform_buffers);
replace(compute_storage_buffers);
- std::erase(cached_write_buffer_ids, buffer_id);
// Mark the whole buffer as CPU written to stop tracking CPU writes
if (!do_not_mark) {
@@ -1668,14 +1655,15 @@ typename BufferCache<P>::Binding BufferCache<P>::StorageBufferBinding(GPUVAddr s
const bool is_nvn_cbuf = cbuf_index == 0;
// The NVN driver buffer (index 0) is known to pack the SSBO address followed by its size.
if (is_nvn_cbuf) {
- return gpu_memory->Read<u32>(ssbo_addr + 8);
+ const u32 ssbo_size = gpu_memory->Read<u32>(ssbo_addr + 8);
+ if (ssbo_size != 0) {
+ return ssbo_size;
+ }
}
// Other titles (notably Doom Eternal) may use STG/LDG on buffer addresses in custom defined
// cbufs, which do not store the sizes adjacent to the addresses, so use the fully
// mapped buffer size for now.
const u32 memory_layout_size = static_cast<u32>(gpu_memory->GetMemoryLayoutSize(gpu_addr));
- LOG_INFO(HW_GPU, "Binding storage buffer for cbuf index {}, MemoryLayoutSize 0x{:X}",
- cbuf_index, memory_layout_size);
return memory_layout_size;
}();
const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr);
diff --git a/src/video_core/buffer_cache/buffer_cache_base.h b/src/video_core/buffer_cache/buffer_cache_base.h
index 656baa550..ac00d4d9d 100644
--- a/src/video_core/buffer_cache/buffer_cache_base.h
+++ b/src/video_core/buffer_cache/buffer_cache_base.h
@@ -188,6 +188,8 @@ public:
void DownloadMemory(VAddr cpu_addr, u64 size);
+ std::optional<VideoCore::RasterizerDownloadArea> GetFlushArea(VAddr cpu_addr, u64 size);
+
bool InlineMemory(VAddr dest_address, size_t copy_size, std::span<const u8> inlined_buffer);
void BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr, u32 size);
@@ -379,8 +381,6 @@ private:
void RunGarbageCollector();
- void WaitOnAsyncFlushes(VAddr cpu_addr, u64 size);
-
void BindHostIndexBuffer();
void BindHostVertexBuffers();
@@ -541,13 +541,10 @@ private:
std::array<std::array<u32, NUM_GRAPHICS_UNIFORM_BUFFERS>, NUM_STAGES>, Empty>
uniform_buffer_binding_sizes{};
- std::vector<BufferId> cached_write_buffer_ids;
-
MemoryTracker memory_tracker;
IntervalSet uncommitted_ranges;
IntervalSet common_ranges;
IntervalSet cached_ranges;
- IntervalSet pending_ranges;
std::deque<IntervalSet> committed_ranges;
// Async Buffers
@@ -572,9 +569,8 @@ private:
u64 critical_memory = 0;
BufferId inline_buffer_id;
- bool active_async_buffers = false;
-
std::array<BufferId, ((1ULL << 39) >> CACHING_PAGEBITS)> page_table;
+ std::vector<u8> tmp_buffer;
};
} // namespace VideoCommon
diff --git a/src/video_core/buffer_cache/memory_tracker_base.h b/src/video_core/buffer_cache/memory_tracker_base.h
index dc4ebfcaa..6036b21c9 100644
--- a/src/video_core/buffer_cache/memory_tracker_base.h
+++ b/src/video_core/buffer_cache/memory_tracker_base.h
@@ -66,6 +66,14 @@ public:
});
}
+ /// Returns true if a region has been marked as Preflushable
+ [[nodiscard]] bool IsRegionPreflushable(VAddr query_cpu_addr, u64 query_size) noexcept {
+ return IteratePages<false>(
+ query_cpu_addr, query_size, [](Manager* manager, u64 offset, size_t size) {
+ return manager->template IsRegionModified<Type::Preflushable>(offset, size);
+ });
+ }
+
/// Mark region as CPU modified, notifying the rasterizer about this change
void MarkRegionAsCpuModified(VAddr dirty_cpu_addr, u64 query_size) {
IteratePages<true>(dirty_cpu_addr, query_size,
@@ -93,6 +101,15 @@ public:
});
}
+ /// Mark region as modified from the host GPU
+ void MarkRegionAsPreflushable(VAddr dirty_cpu_addr, u64 query_size) noexcept {
+ IteratePages<true>(dirty_cpu_addr, query_size,
+ [](Manager* manager, u64 offset, size_t size) {
+ manager->template ChangeRegionState<Type::Preflushable, true>(
+ manager->GetCpuAddr() + offset, size);
+ });
+ }
+
/// Unmark region as modified from the host GPU
void UnmarkRegionAsGpuModified(VAddr dirty_cpu_addr, u64 query_size) noexcept {
IteratePages<true>(dirty_cpu_addr, query_size,
@@ -102,6 +119,15 @@ public:
});
}
+ /// Unmark region as modified from the host GPU
+ void UnmarkRegionAsPreflushable(VAddr dirty_cpu_addr, u64 query_size) noexcept {
+ IteratePages<true>(dirty_cpu_addr, query_size,
+ [](Manager* manager, u64 offset, size_t size) {
+ manager->template ChangeRegionState<Type::Preflushable, false>(
+ manager->GetCpuAddr() + offset, size);
+ });
+ }
+
/// Mark region as modified from the CPU
/// but don't mark it as modified until FlusHCachedWrites is called.
void CachedCpuWrite(VAddr dirty_cpu_addr, u64 query_size) {
diff --git a/src/video_core/buffer_cache/word_manager.h b/src/video_core/buffer_cache/word_manager.h
index a42455045..a336bde41 100644
--- a/src/video_core/buffer_cache/word_manager.h
+++ b/src/video_core/buffer_cache/word_manager.h
@@ -26,6 +26,7 @@ enum class Type {
GPU,
CachedCPU,
Untracked,
+ Preflushable,
};
/// Vector tracking modified pages tightly packed with small vector optimization
@@ -55,17 +56,20 @@ struct Words {
gpu.stack.fill(0);
cached_cpu.stack.fill(0);
untracked.stack.fill(~u64{0});
+ preflushable.stack.fill(0);
} else {
// Share allocation between CPU and GPU pages and set their default values
- u64* const alloc = new u64[num_words * 4];
+ u64* const alloc = new u64[num_words * 5];
cpu.heap = alloc;
gpu.heap = alloc + num_words;
cached_cpu.heap = alloc + num_words * 2;
untracked.heap = alloc + num_words * 3;
+ preflushable.heap = alloc + num_words * 4;
std::fill_n(cpu.heap, num_words, ~u64{0});
std::fill_n(gpu.heap, num_words, 0);
std::fill_n(cached_cpu.heap, num_words, 0);
std::fill_n(untracked.heap, num_words, ~u64{0});
+ std::fill_n(preflushable.heap, num_words, 0);
}
// Clean up tailing bits
const u64 last_word_size = size_bytes % BYTES_PER_WORD;
@@ -88,13 +92,14 @@ struct Words {
gpu = rhs.gpu;
cached_cpu = rhs.cached_cpu;
untracked = rhs.untracked;
+ preflushable = rhs.preflushable;
rhs.cpu.heap = nullptr;
return *this;
}
Words(Words&& rhs) noexcept
: size_bytes{rhs.size_bytes}, num_words{rhs.num_words}, cpu{rhs.cpu}, gpu{rhs.gpu},
- cached_cpu{rhs.cached_cpu}, untracked{rhs.untracked} {
+ cached_cpu{rhs.cached_cpu}, untracked{rhs.untracked}, preflushable{rhs.preflushable} {
rhs.cpu.heap = nullptr;
}
@@ -129,6 +134,8 @@ struct Words {
return std::span<u64>(cached_cpu.Pointer(IsShort()), num_words);
} else if constexpr (type == Type::Untracked) {
return std::span<u64>(untracked.Pointer(IsShort()), num_words);
+ } else if constexpr (type == Type::Preflushable) {
+ return std::span<u64>(preflushable.Pointer(IsShort()), num_words);
}
}
@@ -142,6 +149,8 @@ struct Words {
return std::span<const u64>(cached_cpu.Pointer(IsShort()), num_words);
} else if constexpr (type == Type::Untracked) {
return std::span<const u64>(untracked.Pointer(IsShort()), num_words);
+ } else if constexpr (type == Type::Preflushable) {
+ return std::span<const u64>(preflushable.Pointer(IsShort()), num_words);
}
}
@@ -151,6 +160,7 @@ struct Words {
WordsArray<stack_words> gpu;
WordsArray<stack_words> cached_cpu;
WordsArray<stack_words> untracked;
+ WordsArray<stack_words> preflushable;
};
template <class RasterizerInterface, size_t stack_words = 1>
@@ -292,6 +302,9 @@ public:
(pending_pointer - pending_offset) * BYTES_PER_PAGE);
};
IterateWords(offset, size, [&](size_t index, u64 mask) {
+ if constexpr (type == Type::GPU) {
+ mask &= ~untracked_words[index];
+ }
const u64 word = state_words[index] & mask;
if constexpr (clear) {
if constexpr (type == Type::CPU || type == Type::CachedCPU) {
@@ -340,8 +353,13 @@ public:
static_assert(type != Type::Untracked);
const std::span<const u64> state_words = words.template Span<type>();
+ [[maybe_unused]] const std::span<const u64> untracked_words =
+ words.template Span<Type::Untracked>();
bool result = false;
IterateWords(offset, size, [&](size_t index, u64 mask) {
+ if constexpr (type == Type::GPU) {
+ mask &= ~untracked_words[index];
+ }
const u64 word = state_words[index] & mask;
if (word != 0) {
result = true;
@@ -362,9 +380,14 @@ public:
[[nodiscard]] std::pair<u64, u64> ModifiedRegion(u64 offset, u64 size) const noexcept {
static_assert(type != Type::Untracked);
const std::span<const u64> state_words = words.template Span<type>();
+ [[maybe_unused]] const std::span<const u64> untracked_words =
+ words.template Span<Type::Untracked>();
u64 begin = std::numeric_limits<u64>::max();
u64 end = 0;
IterateWords(offset, size, [&](size_t index, u64 mask) {
+ if constexpr (type == Type::GPU) {
+ mask &= ~untracked_words[index];
+ }
const u64 word = state_words[index] & mask;
if (word == 0) {
return;
diff --git a/src/video_core/engines/maxwell_dma.cpp b/src/video_core/engines/maxwell_dma.cpp
index e68850dc5..ebe5536de 100644
--- a/src/video_core/engines/maxwell_dma.cpp
+++ b/src/video_core/engines/maxwell_dma.cpp
@@ -223,7 +223,7 @@ void MaxwellDMA::CopyBlockLinearToPitch() {
write_buffer.resize_destructive(dst_size);
memory_manager.ReadBlock(src_operand.address, read_buffer.data(), src_size);
- memory_manager.ReadBlockUnsafe(dst_operand.address, write_buffer.data(), dst_size);
+ memory_manager.ReadBlock(dst_operand.address, write_buffer.data(), dst_size);
UnswizzleSubrect(write_buffer, read_buffer, bytes_per_pixel, width, height, depth, x_offset,
src_params.origin.y, x_elements, regs.line_count, block_height, block_depth,
@@ -288,11 +288,7 @@ void MaxwellDMA::CopyPitchToBlockLinear() {
write_buffer.resize_destructive(dst_size);
memory_manager.ReadBlock(regs.offset_in, read_buffer.data(), src_size);
- if (Settings::IsGPULevelExtreme()) {
- memory_manager.ReadBlock(regs.offset_out, write_buffer.data(), dst_size);
- } else {
- memory_manager.ReadBlockUnsafe(regs.offset_out, write_buffer.data(), dst_size);
- }
+ memory_manager.ReadBlockUnsafe(regs.offset_out, write_buffer.data(), dst_size);
// If the input is linear and the output is tiled, swizzle the input and copy it over.
SwizzleSubrect(write_buffer, read_buffer, bytes_per_pixel, width, height, depth, x_offset,
diff --git a/src/video_core/engines/sw_blitter/blitter.cpp b/src/video_core/engines/sw_blitter/blitter.cpp
index 3c9f38559..ff88cd03d 100644
--- a/src/video_core/engines/sw_blitter/blitter.cpp
+++ b/src/video_core/engines/sw_blitter/blitter.cpp
@@ -5,6 +5,7 @@
#include <cmath>
#include <vector>
+#include "common/scratch_buffer.h"
#include "video_core/engines/sw_blitter/blitter.h"
#include "video_core/engines/sw_blitter/converter.h"
#include "video_core/memory_manager.h"
@@ -112,11 +113,11 @@ void Bilinear(std::span<const f32> input, std::span<f32> output, size_t src_widt
} // namespace
struct SoftwareBlitEngine::BlitEngineImpl {
- std::vector<u8> tmp_buffer;
- std::vector<u8> src_buffer;
- std::vector<u8> dst_buffer;
- std::vector<f32> intermediate_src;
- std::vector<f32> intermediate_dst;
+ Common::ScratchBuffer<u8> tmp_buffer;
+ Common::ScratchBuffer<u8> src_buffer;
+ Common::ScratchBuffer<u8> dst_buffer;
+ Common::ScratchBuffer<f32> intermediate_src;
+ Common::ScratchBuffer<f32> intermediate_dst;
ConverterFactory converter_factory;
};
@@ -158,14 +159,14 @@ bool SoftwareBlitEngine::Blit(Fermi2D::Surface& src, Fermi2D::Surface& dst,
const auto src_bytes_per_pixel = BytesPerBlock(PixelFormatFromRenderTargetFormat(src.format));
const auto dst_bytes_per_pixel = BytesPerBlock(PixelFormatFromRenderTargetFormat(dst.format));
const size_t src_size = get_surface_size(src, src_bytes_per_pixel);
- impl->tmp_buffer.resize(src_size);
+ impl->tmp_buffer.resize_destructive(src_size);
memory_manager.ReadBlock(src.Address(), impl->tmp_buffer.data(), src_size);
const size_t src_copy_size = src_extent_x * src_extent_y * src_bytes_per_pixel;
const size_t dst_copy_size = dst_extent_x * dst_extent_y * dst_bytes_per_pixel;
- impl->src_buffer.resize(src_copy_size);
+ impl->src_buffer.resize_destructive(src_copy_size);
const bool no_passthrough =
src.format != dst.format || src_extent_x != dst_extent_x || src_extent_y != dst_extent_y;
@@ -177,8 +178,10 @@ bool SoftwareBlitEngine::Blit(Fermi2D::Surface& src, Fermi2D::Surface& dst,
const auto convertion_phase_ir = [&]() {
auto* input_converter = impl->converter_factory.GetFormatConverter(src.format);
- impl->intermediate_src.resize((src_copy_size / src_bytes_per_pixel) * ir_components);
- impl->intermediate_dst.resize((dst_copy_size / dst_bytes_per_pixel) * ir_components);
+ impl->intermediate_src.resize_destructive((src_copy_size / src_bytes_per_pixel) *
+ ir_components);
+ impl->intermediate_dst.resize_destructive((dst_copy_size / dst_bytes_per_pixel) *
+ ir_components);
input_converter->ConvertTo(impl->src_buffer, impl->intermediate_src);
if (config.filter != Fermi2D::Filter::Bilinear) {
@@ -195,7 +198,7 @@ bool SoftwareBlitEngine::Blit(Fermi2D::Surface& src, Fermi2D::Surface& dst,
// Do actual Blit
- impl->dst_buffer.resize(dst_copy_size);
+ impl->dst_buffer.resize_destructive(dst_copy_size);
if (src.linear == Fermi2D::MemoryLayout::BlockLinear) {
UnswizzleSubrect(impl->src_buffer, impl->tmp_buffer, src_bytes_per_pixel, src.width,
src.height, src.depth, config.src_x0, config.src_y0, src_extent_x,
@@ -218,7 +221,7 @@ bool SoftwareBlitEngine::Blit(Fermi2D::Surface& src, Fermi2D::Surface& dst,
}
const size_t dst_size = get_surface_size(dst, dst_bytes_per_pixel);
- impl->tmp_buffer.resize(dst_size);
+ impl->tmp_buffer.resize_destructive(dst_size);
memory_manager.ReadBlock(dst.Address(), impl->tmp_buffer.data(), dst_size);
if (dst.linear == Fermi2D::MemoryLayout::BlockLinear) {
diff --git a/src/video_core/fence_manager.h b/src/video_core/fence_manager.h
index 3b2f6aab6..35d699bbf 100644
--- a/src/video_core/fence_manager.h
+++ b/src/video_core/fence_manager.h
@@ -59,6 +59,11 @@ public:
buffer_cache.AccumulateFlushes();
}
+ void SignalReference() {
+ std::function<void()> do_nothing([] {});
+ SignalFence(std::move(do_nothing));
+ }
+
void SyncOperation(std::function<void()>&& func) {
uncommitted_operations.emplace_back(std::move(func));
}
diff --git a/src/video_core/gpu.cpp b/src/video_core/gpu.cpp
index 2e7f9c5ed..295a416a8 100644
--- a/src/video_core/gpu.cpp
+++ b/src/video_core/gpu.cpp
@@ -283,6 +283,21 @@ struct GPU::Impl {
gpu_thread.FlushRegion(addr, size);
}
+ VideoCore::RasterizerDownloadArea OnCPURead(VAddr addr, u64 size) {
+ auto raster_area = rasterizer->GetFlushArea(addr, size);
+ if (raster_area.preemtive) {
+ return raster_area;
+ }
+ raster_area.preemtive = true;
+ const u64 fence = RequestSyncOperation([this, &raster_area]() {
+ rasterizer->FlushRegion(raster_area.start_address,
+ raster_area.end_address - raster_area.start_address);
+ });
+ gpu_thread.TickGPU();
+ WaitForSyncOperation(fence);
+ return raster_area;
+ }
+
/// Notify rasterizer that any caches of the specified region should be invalidated
void InvalidateRegion(VAddr addr, u64 size) {
gpu_thread.InvalidateRegion(addr, size);
@@ -538,6 +553,10 @@ void GPU::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) {
impl->SwapBuffers(framebuffer);
}
+VideoCore::RasterizerDownloadArea GPU::OnCPURead(VAddr addr, u64 size) {
+ return impl->OnCPURead(addr, size);
+}
+
void GPU::FlushRegion(VAddr addr, u64 size) {
impl->FlushRegion(addr, size);
}
diff --git a/src/video_core/gpu.h b/src/video_core/gpu.h
index 8a871593a..e49c40cf2 100644
--- a/src/video_core/gpu.h
+++ b/src/video_core/gpu.h
@@ -10,6 +10,7 @@
#include "core/hle/service/nvdrv/nvdata.h"
#include "video_core/cdma_pusher.h"
#include "video_core/framebuffer_config.h"
+#include "video_core/rasterizer_download_area.h"
namespace Core {
class System;
@@ -241,6 +242,9 @@ public:
void SwapBuffers(const Tegra::FramebufferConfig* framebuffer);
/// Notify rasterizer that any caches of the specified region should be flushed to Switch memory
+ [[nodiscard]] VideoCore::RasterizerDownloadArea OnCPURead(VAddr addr, u64 size);
+
+ /// Notify rasterizer that any caches of the specified region should be flushed to Switch memory
void FlushRegion(VAddr addr, u64 size);
/// Notify rasterizer that any caches of the specified region should be invalidated
diff --git a/src/video_core/host1x/codecs/h264.cpp b/src/video_core/host1x/codecs/h264.cpp
index e87bd65fa..6ce179167 100644
--- a/src/video_core/host1x/codecs/h264.cpp
+++ b/src/video_core/host1x/codecs/h264.cpp
@@ -111,7 +111,7 @@ const std::vector<u8>& H264::ComposeFrame(const Host1x::NvdecCommon::NvdecRegist
writer.WriteUe(0);
writer.WriteBit(context.h264_parameter_set.entropy_coding_mode_flag != 0);
- writer.WriteBit(false);
+ writer.WriteBit(context.h264_parameter_set.pic_order_present_flag != 0);
writer.WriteUe(0);
writer.WriteUe(context.h264_parameter_set.num_refidx_l0_default_active);
writer.WriteUe(context.h264_parameter_set.num_refidx_l1_default_active);
@@ -129,7 +129,7 @@ const std::vector<u8>& H264::ComposeFrame(const Host1x::NvdecCommon::NvdecRegist
writer.WriteBit(context.h264_parameter_set.redundant_pic_cnt_present_flag != 0);
writer.WriteBit(context.h264_parameter_set.transform_8x8_mode_flag != 0);
- writer.WriteBit(true);
+ writer.WriteBit(true); // pic_scaling_matrix_present_flag
for (s32 index = 0; index < 6; index++) {
writer.WriteBit(true);
diff --git a/src/video_core/query_cache.h b/src/video_core/query_cache.h
index 941de95c1..1528cc1dd 100644
--- a/src/video_core/query_cache.h
+++ b/src/video_core/query_cache.h
@@ -255,7 +255,6 @@ private:
if (!in_range(query)) {
continue;
}
- rasterizer.UpdatePagesCachedCount(query.GetCpuAddr(), query.SizeInBytes(), -1);
AsyncJobId async_job_id = query.GetAsyncJob();
auto flush_result = query.Flush(async);
if (async_job_id == NULL_ASYNC_JOB_ID) {
@@ -273,7 +272,6 @@ private:
/// Registers the passed parameters as cached and returns a pointer to the stored cached query.
CachedQuery* Register(VideoCore::QueryType type, VAddr cpu_addr, u8* host_ptr, bool timestamp) {
- rasterizer.UpdatePagesCachedCount(cpu_addr, CachedQuery::SizeInBytes(timestamp), 1);
const u64 page = static_cast<u64>(cpu_addr) >> YUZU_PAGEBITS;
return &cached_queries[page].emplace_back(static_cast<QueryCache&>(*this), type, cpu_addr,
host_ptr);
diff --git a/src/video_core/rasterizer_download_area.h b/src/video_core/rasterizer_download_area.h
new file mode 100644
index 000000000..2d7425c79
--- /dev/null
+++ b/src/video_core/rasterizer_download_area.h
@@ -0,0 +1,16 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#pragma once
+
+#include "common/common_types.h"
+
+namespace VideoCore {
+
+struct RasterizerDownloadArea {
+ VAddr start_address;
+ VAddr end_address;
+ bool preemtive;
+};
+
+} // namespace VideoCore \ No newline at end of file
diff --git a/src/video_core/rasterizer_interface.h b/src/video_core/rasterizer_interface.h
index 33e2610bc..7566a8c4e 100644
--- a/src/video_core/rasterizer_interface.h
+++ b/src/video_core/rasterizer_interface.h
@@ -12,6 +12,7 @@
#include "video_core/cache_types.h"
#include "video_core/engines/fermi_2d.h"
#include "video_core/gpu.h"
+#include "video_core/rasterizer_download_area.h"
namespace Tegra {
class MemoryManager;
@@ -95,6 +96,8 @@ public:
virtual bool MustFlushRegion(VAddr addr, u64 size,
VideoCommon::CacheType which = VideoCommon::CacheType::All) = 0;
+ virtual RasterizerDownloadArea GetFlushArea(VAddr addr, u64 size) = 0;
+
/// Notify rasterizer that any caches of the specified region should be invalidated
virtual void InvalidateRegion(VAddr addr, u64 size,
VideoCommon::CacheType which = VideoCommon::CacheType::All) = 0;
diff --git a/src/video_core/renderer_null/null_rasterizer.cpp b/src/video_core/renderer_null/null_rasterizer.cpp
index 2b5c7defa..bf2ce4c49 100644
--- a/src/video_core/renderer_null/null_rasterizer.cpp
+++ b/src/video_core/renderer_null/null_rasterizer.cpp
@@ -1,6 +1,8 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
+#include "common/alignment.h"
+#include "core/memory.h"
#include "video_core/host1x/host1x.h"
#include "video_core/memory_manager.h"
#include "video_core/renderer_null/null_rasterizer.h"
@@ -46,6 +48,14 @@ bool RasterizerNull::MustFlushRegion(VAddr addr, u64 size, VideoCommon::CacheTyp
}
void RasterizerNull::InvalidateRegion(VAddr addr, u64 size, VideoCommon::CacheType) {}
void RasterizerNull::OnCPUWrite(VAddr addr, u64 size) {}
+VideoCore::RasterizerDownloadArea RasterizerNull::GetFlushArea(VAddr addr, u64 size) {
+ VideoCore::RasterizerDownloadArea new_area{
+ .start_address = Common::AlignDown(addr, Core::Memory::YUZU_PAGESIZE),
+ .end_address = Common::AlignUp(addr + size, Core::Memory::YUZU_PAGESIZE),
+ .preemtive = true,
+ };
+ return new_area;
+}
void RasterizerNull::InvalidateGPUCache() {}
void RasterizerNull::UnmapMemory(VAddr addr, u64 size) {}
void RasterizerNull::ModifyGPUMemory(size_t as_id, GPUVAddr addr, u64 size) {}
diff --git a/src/video_core/renderer_null/null_rasterizer.h b/src/video_core/renderer_null/null_rasterizer.h
index 0c59e6a1f..a8d35d2c1 100644
--- a/src/video_core/renderer_null/null_rasterizer.h
+++ b/src/video_core/renderer_null/null_rasterizer.h
@@ -54,6 +54,7 @@ public:
void InvalidateRegion(VAddr addr, u64 size,
VideoCommon::CacheType which = VideoCommon::CacheType::All) override;
void OnCPUWrite(VAddr addr, u64 size) override;
+ VideoCore::RasterizerDownloadArea GetFlushArea(VAddr addr, u64 size) override;
void InvalidateGPUCache() override;
void UnmapMemory(VAddr addr, u64 size) override;
void ModifyGPUMemory(size_t as_id, GPUVAddr addr, u64 size) override;
diff --git a/src/video_core/renderer_opengl/gl_device.cpp b/src/video_core/renderer_opengl/gl_device.cpp
index 22ed16ebf..400c21981 100644
--- a/src/video_core/renderer_opengl/gl_device.cpp
+++ b/src/video_core/renderer_opengl/gl_device.cpp
@@ -108,7 +108,8 @@ bool IsASTCSupported() {
[[nodiscard]] bool IsDebugToolAttached(std::span<const std::string_view> extensions) {
const bool nsight = std::getenv("NVTX_INJECTION64_PATH") || std::getenv("NSIGHT_LAUNCHED");
- return nsight || HasExtension(extensions, "GL_EXT_debug_tool");
+ return nsight || HasExtension(extensions, "GL_EXT_debug_tool") ||
+ Settings::values.renderer_debug.GetValue();
}
} // Anonymous namespace
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp
index 0089b4b27..f5baa0f3c 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer.cpp
+++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp
@@ -433,6 +433,29 @@ bool RasterizerOpenGL::MustFlushRegion(VAddr addr, u64 size, VideoCommon::CacheT
return false;
}
+VideoCore::RasterizerDownloadArea RasterizerOpenGL::GetFlushArea(VAddr addr, u64 size) {
+ {
+ std::scoped_lock lock{texture_cache.mutex};
+ auto area = texture_cache.GetFlushArea(addr, size);
+ if (area) {
+ return *area;
+ }
+ }
+ {
+ std::scoped_lock lock{buffer_cache.mutex};
+ auto area = buffer_cache.GetFlushArea(addr, size);
+ if (area) {
+ return *area;
+ }
+ }
+ VideoCore::RasterizerDownloadArea new_area{
+ .start_address = Common::AlignDown(addr, Core::Memory::YUZU_PAGESIZE),
+ .end_address = Common::AlignUp(addr + size, Core::Memory::YUZU_PAGESIZE),
+ .preemtive = true,
+ };
+ return new_area;
+}
+
void RasterizerOpenGL::InvalidateRegion(VAddr addr, u64 size, VideoCommon::CacheType which) {
MICROPROFILE_SCOPE(OpenGL_CacheManagement);
if (addr == 0 || size == 0) {
@@ -1281,7 +1304,7 @@ bool AccelerateDMA::DmaBufferImageCopy(const Tegra::DMA::ImageCopy& copy_info,
const Tegra::DMA::BufferOperand& buffer_operand,
const Tegra::DMA::ImageOperand& image_operand) {
std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
- const auto image_id = texture_cache.DmaImageId(image_operand);
+ const auto image_id = texture_cache.DmaImageId(image_operand, IS_IMAGE_UPLOAD);
if (image_id == VideoCommon::NULL_IMAGE_ID) {
return false;
}
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.h b/src/video_core/renderer_opengl/gl_rasterizer.h
index ad6978bd0..410d8ffc5 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer.h
+++ b/src/video_core/renderer_opengl/gl_rasterizer.h
@@ -95,6 +95,7 @@ public:
VideoCommon::CacheType which = VideoCommon::CacheType::All) override;
bool MustFlushRegion(VAddr addr, u64 size,
VideoCommon::CacheType which = VideoCommon::CacheType::All) override;
+ VideoCore::RasterizerDownloadArea GetFlushArea(VAddr addr, u64 size) override;
void InvalidateRegion(VAddr addr, u64 size,
VideoCommon::CacheType which = VideoCommon::CacheType::All) override;
void OnCPUWrite(VAddr addr, u64 size) override;
diff --git a/src/video_core/renderer_opengl/gl_texture_cache.cpp b/src/video_core/renderer_opengl/gl_texture_cache.cpp
index 47cccd0e5..31118886f 100644
--- a/src/video_core/renderer_opengl/gl_texture_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_texture_cache.cpp
@@ -231,7 +231,7 @@ void ApplySwizzle(GLuint handle, PixelFormat format, std::array<SwizzleSource, 4
[[nodiscard]] bool CanBeAccelerated(const TextureCacheRuntime& runtime,
const VideoCommon::ImageInfo& info) {
- if (IsPixelFormatASTC(info.format) && !runtime.HasNativeASTC()) {
+ if (IsPixelFormatASTC(info.format) && info.size.depth == 1 && !runtime.HasNativeASTC()) {
return Settings::values.accelerate_astc.GetValue() &&
!Settings::values.async_astc.GetValue();
}
@@ -1126,7 +1126,8 @@ bool Image::ScaleDown(bool ignore) {
ImageView::ImageView(TextureCacheRuntime& runtime, const VideoCommon::ImageViewInfo& info,
ImageId image_id_, Image& image, const SlotVector<Image>&)
- : VideoCommon::ImageViewBase{info, image.info, image_id_}, views{runtime.null_image_views} {
+ : VideoCommon::ImageViewBase{info, image.info, image_id_, image.gpu_addr},
+ views{runtime.null_image_views} {
const Device& device = runtime.device;
if (True(image.flags & ImageFlagBits::Converted)) {
internal_format = IsPixelFormatSRGB(info.format) ? GL_SRGB8_ALPHA8 : GL_RGBA8;
@@ -1217,12 +1218,12 @@ ImageView::ImageView(TextureCacheRuntime& runtime, const VideoCommon::ImageViewI
ImageView::ImageView(TextureCacheRuntime&, const VideoCommon::ImageInfo& info,
const VideoCommon::ImageViewInfo& view_info, GPUVAddr gpu_addr_)
- : VideoCommon::ImageViewBase{info, view_info}, gpu_addr{gpu_addr_},
+ : VideoCommon::ImageViewBase{info, view_info, gpu_addr_},
buffer_size{VideoCommon::CalculateGuestSizeInBytes(info)} {}
ImageView::ImageView(TextureCacheRuntime&, const VideoCommon::ImageInfo& info,
const VideoCommon::ImageViewInfo& view_info)
- : VideoCommon::ImageViewBase{info, view_info} {}
+ : VideoCommon::ImageViewBase{info, view_info, 0} {}
ImageView::ImageView(TextureCacheRuntime& runtime, const VideoCommon::NullImageViewParams& params)
: VideoCommon::ImageViewBase{params}, views{runtime.null_image_views} {}
@@ -1282,7 +1283,7 @@ GLuint ImageView::MakeView(Shader::TextureType view_type, GLenum view_format) {
ApplySwizzle(view.handle, format, casted_swizzle);
}
if (set_object_label) {
- const std::string name = VideoCommon::Name(*this);
+ const std::string name = VideoCommon::Name(*this, gpu_addr);
glObjectLabel(GL_TEXTURE, view.handle, static_cast<GLsizei>(name.size()), name.data());
}
return view.handle;
diff --git a/src/video_core/renderer_opengl/gl_texture_cache.h b/src/video_core/renderer_opengl/gl_texture_cache.h
index 0dd039ed2..1190999a8 100644
--- a/src/video_core/renderer_opengl/gl_texture_cache.h
+++ b/src/video_core/renderer_opengl/gl_texture_cache.h
@@ -314,7 +314,6 @@ private:
std::unique_ptr<StorageViews> storage_views;
GLenum internal_format = GL_NONE;
GLuint default_handle = 0;
- GPUVAddr gpu_addr = 0;
u32 buffer_size = 0;
GLuint original_texture = 0;
int num_samples = 0;
diff --git a/src/video_core/renderer_vulkan/pipeline_helper.h b/src/video_core/renderer_vulkan/pipeline_helper.h
index 28b893e25..983e1c2e1 100644
--- a/src/video_core/renderer_vulkan/pipeline_helper.h
+++ b/src/video_core/renderer_vulkan/pipeline_helper.h
@@ -176,7 +176,7 @@ public:
};
inline void PushImageDescriptors(TextureCache& texture_cache,
- UpdateDescriptorQueue& update_descriptor_queue,
+ GuestDescriptorQueue& guest_descriptor_queue,
const Shader::Info& info, RescalingPushConstant& rescaling,
const VkSampler*& samplers,
const VideoCommon::ImageViewInOut*& views) {
@@ -190,7 +190,7 @@ inline void PushImageDescriptors(TextureCache& texture_cache,
const VkSampler sampler{*(samplers++)};
ImageView& image_view{texture_cache.GetImageView(image_view_id)};
const VkImageView vk_image_view{image_view.Handle(desc.type)};
- update_descriptor_queue.AddSampledImage(vk_image_view, sampler);
+ guest_descriptor_queue.AddSampledImage(vk_image_view, sampler);
rescaling.PushTexture(texture_cache.IsRescaling(image_view));
}
}
@@ -201,7 +201,7 @@ inline void PushImageDescriptors(TextureCache& texture_cache,
texture_cache.MarkModification(image_view.image_id);
}
const VkImageView vk_image_view{image_view.StorageView(desc.type, desc.format)};
- update_descriptor_queue.AddImage(vk_image_view);
+ guest_descriptor_queue.AddImage(vk_image_view);
rescaling.PushImage(texture_cache.IsRescaling(image_view));
}
}
diff --git a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp
index 510602e8e..9627eb129 100644
--- a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp
@@ -298,12 +298,14 @@ private:
BufferCacheRuntime::BufferCacheRuntime(const Device& device_, MemoryAllocator& memory_allocator_,
Scheduler& scheduler_, StagingBufferPool& staging_pool_,
- UpdateDescriptorQueue& update_descriptor_queue_,
+ GuestDescriptorQueue& guest_descriptor_queue_,
+ ComputePassDescriptorQueue& compute_pass_descriptor_queue,
DescriptorPool& descriptor_pool)
: device{device_}, memory_allocator{memory_allocator_}, scheduler{scheduler_},
- staging_pool{staging_pool_}, update_descriptor_queue{update_descriptor_queue_},
- uint8_pass(device, scheduler, descriptor_pool, staging_pool, update_descriptor_queue),
- quad_index_pass(device, scheduler, descriptor_pool, staging_pool, update_descriptor_queue) {
+ staging_pool{staging_pool_}, guest_descriptor_queue{guest_descriptor_queue_},
+ uint8_pass(device, scheduler, descriptor_pool, staging_pool, compute_pass_descriptor_queue),
+ quad_index_pass(device, scheduler, descriptor_pool, staging_pool,
+ compute_pass_descriptor_queue) {
quad_array_index_buffer = std::make_shared<QuadArrayIndexBuffer>(device_, memory_allocator_,
scheduler_, staging_pool_);
quad_strip_index_buffer = std::make_shared<QuadStripIndexBuffer>(device_, memory_allocator_,
diff --git a/src/video_core/renderer_vulkan/vk_buffer_cache.h b/src/video_core/renderer_vulkan/vk_buffer_cache.h
index 879f1ed94..5e9602905 100644
--- a/src/video_core/renderer_vulkan/vk_buffer_cache.h
+++ b/src/video_core/renderer_vulkan/vk_buffer_cache.h
@@ -63,7 +63,8 @@ class BufferCacheRuntime {
public:
explicit BufferCacheRuntime(const Device& device_, MemoryAllocator& memory_manager_,
Scheduler& scheduler_, StagingBufferPool& staging_pool_,
- UpdateDescriptorQueue& update_descriptor_queue_,
+ GuestDescriptorQueue& guest_descriptor_queue,
+ ComputePassDescriptorQueue& compute_pass_descriptor_queue,
DescriptorPool& descriptor_pool);
void Finish();
@@ -116,12 +117,12 @@ public:
void BindTextureBuffer(Buffer& buffer, u32 offset, u32 size,
VideoCore::Surface::PixelFormat format) {
- update_descriptor_queue.AddTexelBuffer(buffer.View(offset, size, format));
+ guest_descriptor_queue.AddTexelBuffer(buffer.View(offset, size, format));
}
private:
void BindBuffer(VkBuffer buffer, u32 offset, u32 size) {
- update_descriptor_queue.AddBuffer(buffer, offset, size);
+ guest_descriptor_queue.AddBuffer(buffer, offset, size);
}
void ReserveNullBuffer();
@@ -130,7 +131,7 @@ private:
MemoryAllocator& memory_allocator;
Scheduler& scheduler;
StagingBufferPool& staging_pool;
- UpdateDescriptorQueue& update_descriptor_queue;
+ GuestDescriptorQueue& guest_descriptor_queue;
std::shared_ptr<QuadArrayIndexBuffer> quad_array_index_buffer;
std::shared_ptr<QuadStripIndexBuffer> quad_strip_index_buffer;
diff --git a/src/video_core/renderer_vulkan/vk_compute_pass.cpp b/src/video_core/renderer_vulkan/vk_compute_pass.cpp
index 1a316b6eb..3bc8553e1 100644
--- a/src/video_core/renderer_vulkan/vk_compute_pass.cpp
+++ b/src/video_core/renderer_vulkan/vk_compute_pass.cpp
@@ -200,12 +200,12 @@ ComputePass::~ComputePass() = default;
Uint8Pass::Uint8Pass(const Device& device_, Scheduler& scheduler_, DescriptorPool& descriptor_pool,
StagingBufferPool& staging_buffer_pool_,
- UpdateDescriptorQueue& update_descriptor_queue_)
+ ComputePassDescriptorQueue& compute_pass_descriptor_queue_)
: ComputePass(device_, descriptor_pool, INPUT_OUTPUT_DESCRIPTOR_SET_BINDINGS,
INPUT_OUTPUT_DESCRIPTOR_UPDATE_TEMPLATE, INPUT_OUTPUT_BANK_INFO, {},
VULKAN_UINT8_COMP_SPV),
scheduler{scheduler_}, staging_buffer_pool{staging_buffer_pool_},
- update_descriptor_queue{update_descriptor_queue_} {}
+ compute_pass_descriptor_queue{compute_pass_descriptor_queue_} {}
Uint8Pass::~Uint8Pass() = default;
@@ -214,10 +214,10 @@ std::pair<VkBuffer, VkDeviceSize> Uint8Pass::Assemble(u32 num_vertices, VkBuffer
const u32 staging_size = static_cast<u32>(num_vertices * sizeof(u16));
const auto staging = staging_buffer_pool.Request(staging_size, MemoryUsage::DeviceLocal);
- update_descriptor_queue.Acquire();
- update_descriptor_queue.AddBuffer(src_buffer, src_offset, num_vertices);
- update_descriptor_queue.AddBuffer(staging.buffer, staging.offset, staging_size);
- const void* const descriptor_data{update_descriptor_queue.UpdateData()};
+ compute_pass_descriptor_queue.Acquire();
+ compute_pass_descriptor_queue.AddBuffer(src_buffer, src_offset, num_vertices);
+ compute_pass_descriptor_queue.AddBuffer(staging.buffer, staging.offset, staging_size);
+ const void* const descriptor_data{compute_pass_descriptor_queue.UpdateData()};
scheduler.RequestOutsideRenderPassOperationContext();
scheduler.Record([this, descriptor_data, num_vertices](vk::CommandBuffer cmdbuf) {
@@ -242,12 +242,12 @@ std::pair<VkBuffer, VkDeviceSize> Uint8Pass::Assemble(u32 num_vertices, VkBuffer
QuadIndexedPass::QuadIndexedPass(const Device& device_, Scheduler& scheduler_,
DescriptorPool& descriptor_pool_,
StagingBufferPool& staging_buffer_pool_,
- UpdateDescriptorQueue& update_descriptor_queue_)
+ ComputePassDescriptorQueue& compute_pass_descriptor_queue_)
: ComputePass(device_, descriptor_pool_, INPUT_OUTPUT_DESCRIPTOR_SET_BINDINGS,
INPUT_OUTPUT_DESCRIPTOR_UPDATE_TEMPLATE, INPUT_OUTPUT_BANK_INFO,
COMPUTE_PUSH_CONSTANT_RANGE<sizeof(u32) * 3>, VULKAN_QUAD_INDEXED_COMP_SPV),
scheduler{scheduler_}, staging_buffer_pool{staging_buffer_pool_},
- update_descriptor_queue{update_descriptor_queue_} {}
+ compute_pass_descriptor_queue{compute_pass_descriptor_queue_} {}
QuadIndexedPass::~QuadIndexedPass() = default;
@@ -272,10 +272,10 @@ std::pair<VkBuffer, VkDeviceSize> QuadIndexedPass::Assemble(
const std::size_t staging_size = num_tri_vertices * sizeof(u32);
const auto staging = staging_buffer_pool.Request(staging_size, MemoryUsage::DeviceLocal);
- update_descriptor_queue.Acquire();
- update_descriptor_queue.AddBuffer(src_buffer, src_offset, input_size);
- update_descriptor_queue.AddBuffer(staging.buffer, staging.offset, staging_size);
- const void* const descriptor_data{update_descriptor_queue.UpdateData()};
+ compute_pass_descriptor_queue.Acquire();
+ compute_pass_descriptor_queue.AddBuffer(src_buffer, src_offset, input_size);
+ compute_pass_descriptor_queue.AddBuffer(staging.buffer, staging.offset, staging_size);
+ const void* const descriptor_data{compute_pass_descriptor_queue.UpdateData()};
scheduler.RequestOutsideRenderPassOperationContext();
scheduler.Record([this, descriptor_data, num_tri_vertices, base_vertex, index_shift,
@@ -304,13 +304,14 @@ std::pair<VkBuffer, VkDeviceSize> QuadIndexedPass::Assemble(
ASTCDecoderPass::ASTCDecoderPass(const Device& device_, Scheduler& scheduler_,
DescriptorPool& descriptor_pool_,
StagingBufferPool& staging_buffer_pool_,
- UpdateDescriptorQueue& update_descriptor_queue_,
+ ComputePassDescriptorQueue& compute_pass_descriptor_queue_,
MemoryAllocator& memory_allocator_)
: ComputePass(device_, descriptor_pool_, ASTC_DESCRIPTOR_SET_BINDINGS,
ASTC_PASS_DESCRIPTOR_UPDATE_TEMPLATE_ENTRY, ASTC_BANK_INFO,
COMPUTE_PUSH_CONSTANT_RANGE<sizeof(AstcPushConstants)>, ASTC_DECODER_COMP_SPV),
scheduler{scheduler_}, staging_buffer_pool{staging_buffer_pool_},
- update_descriptor_queue{update_descriptor_queue_}, memory_allocator{memory_allocator_} {}
+ compute_pass_descriptor_queue{compute_pass_descriptor_queue_}, memory_allocator{
+ memory_allocator_} {}
ASTCDecoderPass::~ASTCDecoderPass() = default;
@@ -358,11 +359,11 @@ void ASTCDecoderPass::Assemble(Image& image, const StagingBufferRef& map,
const u32 num_dispatches_y = Common::DivCeil(swizzle.num_tiles.height, 8U);
const u32 num_dispatches_z = image.info.resources.layers;
- update_descriptor_queue.Acquire();
- update_descriptor_queue.AddBuffer(map.buffer, input_offset,
- image.guest_size_bytes - swizzle.buffer_offset);
- update_descriptor_queue.AddImage(image.StorageImageView(swizzle.level));
- const void* const descriptor_data{update_descriptor_queue.UpdateData()};
+ compute_pass_descriptor_queue.Acquire();
+ compute_pass_descriptor_queue.AddBuffer(map.buffer, input_offset,
+ image.guest_size_bytes - swizzle.buffer_offset);
+ compute_pass_descriptor_queue.AddImage(image.StorageImageView(swizzle.level));
+ const void* const descriptor_data{compute_pass_descriptor_queue.UpdateData()};
// To unswizzle the ASTC data
const auto params = MakeBlockLinearSwizzle2DParams(swizzle, image.info);
diff --git a/src/video_core/renderer_vulkan/vk_compute_pass.h b/src/video_core/renderer_vulkan/vk_compute_pass.h
index c4c8fa081..dd3927376 100644
--- a/src/video_core/renderer_vulkan/vk_compute_pass.h
+++ b/src/video_core/renderer_vulkan/vk_compute_pass.h
@@ -9,6 +9,7 @@
#include "common/common_types.h"
#include "video_core/engines/maxwell_3d.h"
#include "video_core/renderer_vulkan/vk_descriptor_pool.h"
+#include "video_core/renderer_vulkan/vk_update_descriptor.h"
#include "video_core/vulkan_common/vulkan_memory_allocator.h"
#include "video_core/vulkan_common/vulkan_wrapper.h"
@@ -21,7 +22,6 @@ namespace Vulkan {
class Device;
class StagingBufferPool;
class Scheduler;
-class UpdateDescriptorQueue;
class Image;
struct StagingBufferRef;
@@ -50,7 +50,7 @@ class Uint8Pass final : public ComputePass {
public:
explicit Uint8Pass(const Device& device_, Scheduler& scheduler_,
DescriptorPool& descriptor_pool_, StagingBufferPool& staging_buffer_pool_,
- UpdateDescriptorQueue& update_descriptor_queue_);
+ ComputePassDescriptorQueue& compute_pass_descriptor_queue_);
~Uint8Pass();
/// Assemble uint8 indices into an uint16 index buffer
@@ -61,7 +61,7 @@ public:
private:
Scheduler& scheduler;
StagingBufferPool& staging_buffer_pool;
- UpdateDescriptorQueue& update_descriptor_queue;
+ ComputePassDescriptorQueue& compute_pass_descriptor_queue;
};
class QuadIndexedPass final : public ComputePass {
@@ -69,7 +69,7 @@ public:
explicit QuadIndexedPass(const Device& device_, Scheduler& scheduler_,
DescriptorPool& descriptor_pool_,
StagingBufferPool& staging_buffer_pool_,
- UpdateDescriptorQueue& update_descriptor_queue_);
+ ComputePassDescriptorQueue& compute_pass_descriptor_queue_);
~QuadIndexedPass();
std::pair<VkBuffer, VkDeviceSize> Assemble(
@@ -79,7 +79,7 @@ public:
private:
Scheduler& scheduler;
StagingBufferPool& staging_buffer_pool;
- UpdateDescriptorQueue& update_descriptor_queue;
+ ComputePassDescriptorQueue& compute_pass_descriptor_queue;
};
class ASTCDecoderPass final : public ComputePass {
@@ -87,7 +87,7 @@ public:
explicit ASTCDecoderPass(const Device& device_, Scheduler& scheduler_,
DescriptorPool& descriptor_pool_,
StagingBufferPool& staging_buffer_pool_,
- UpdateDescriptorQueue& update_descriptor_queue_,
+ ComputePassDescriptorQueue& compute_pass_descriptor_queue_,
MemoryAllocator& memory_allocator_);
~ASTCDecoderPass();
@@ -97,7 +97,7 @@ public:
private:
Scheduler& scheduler;
StagingBufferPool& staging_buffer_pool;
- UpdateDescriptorQueue& update_descriptor_queue;
+ ComputePassDescriptorQueue& compute_pass_descriptor_queue;
MemoryAllocator& memory_allocator;
};
diff --git a/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp b/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp
index 2a0f0dbf0..733e70d9d 100644
--- a/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp
+++ b/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp
@@ -26,13 +26,13 @@ using Tegra::Texture::TexturePair;
ComputePipeline::ComputePipeline(const Device& device_, vk::PipelineCache& pipeline_cache_,
DescriptorPool& descriptor_pool,
- UpdateDescriptorQueue& update_descriptor_queue_,
+ GuestDescriptorQueue& guest_descriptor_queue_,
Common::ThreadWorker* thread_worker,
PipelineStatistics* pipeline_statistics,
VideoCore::ShaderNotify* shader_notify, const Shader::Info& info_,
vk::ShaderModule spv_module_)
- : device{device_}, pipeline_cache(pipeline_cache_),
- update_descriptor_queue{update_descriptor_queue_}, info{info_},
+ : device{device_},
+ pipeline_cache(pipeline_cache_), guest_descriptor_queue{guest_descriptor_queue_}, info{info_},
spv_module(std::move(spv_module_)) {
if (shader_notify) {
shader_notify->MarkShaderBuilding();
@@ -99,7 +99,7 @@ ComputePipeline::ComputePipeline(const Device& device_, vk::PipelineCache& pipel
void ComputePipeline::Configure(Tegra::Engines::KeplerCompute& kepler_compute,
Tegra::MemoryManager& gpu_memory, Scheduler& scheduler,
BufferCache& buffer_cache, TextureCache& texture_cache) {
- update_descriptor_queue.Acquire();
+ guest_descriptor_queue.Acquire();
buffer_cache.SetComputeUniformBufferState(info.constant_buffer_mask, &uniform_buffer_sizes);
buffer_cache.UnbindComputeStorageBuffers();
@@ -194,7 +194,7 @@ void ComputePipeline::Configure(Tegra::Engines::KeplerCompute& kepler_compute,
RescalingPushConstant rescaling;
const VkSampler* samplers_it{samplers.data()};
const VideoCommon::ImageViewInOut* views_it{views.data()};
- PushImageDescriptors(texture_cache, update_descriptor_queue, info, rescaling, samplers_it,
+ PushImageDescriptors(texture_cache, guest_descriptor_queue, info, rescaling, samplers_it,
views_it);
if (!is_built.load(std::memory_order::relaxed)) {
@@ -204,7 +204,7 @@ void ComputePipeline::Configure(Tegra::Engines::KeplerCompute& kepler_compute,
build_condvar.wait(lock, [this] { return is_built.load(std::memory_order::relaxed); });
});
}
- const void* const descriptor_data{update_descriptor_queue.UpdateData()};
+ const void* const descriptor_data{guest_descriptor_queue.UpdateData()};
const bool is_rescaling = !info.texture_descriptors.empty() || !info.image_descriptors.empty();
scheduler.Record([this, descriptor_data, is_rescaling,
rescaling_data = rescaling.Data()](vk::CommandBuffer cmdbuf) {
diff --git a/src/video_core/renderer_vulkan/vk_compute_pipeline.h b/src/video_core/renderer_vulkan/vk_compute_pipeline.h
index 78d77027f..d1a1e2c46 100644
--- a/src/video_core/renderer_vulkan/vk_compute_pipeline.h
+++ b/src/video_core/renderer_vulkan/vk_compute_pipeline.h
@@ -30,7 +30,7 @@ class ComputePipeline {
public:
explicit ComputePipeline(const Device& device, vk::PipelineCache& pipeline_cache,
DescriptorPool& descriptor_pool,
- UpdateDescriptorQueue& update_descriptor_queue,
+ GuestDescriptorQueue& guest_descriptor_queue,
Common::ThreadWorker* thread_worker,
PipelineStatistics* pipeline_statistics,
VideoCore::ShaderNotify* shader_notify, const Shader::Info& info,
@@ -48,7 +48,7 @@ public:
private:
const Device& device;
vk::PipelineCache& pipeline_cache;
- UpdateDescriptorQueue& update_descriptor_queue;
+ GuestDescriptorQueue& guest_descriptor_queue;
Shader::Info info;
VideoCommon::ComputeUniformBufferSizes uniform_buffer_sizes{};
diff --git a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp
index baedc4424..f1bcd5cd6 100644
--- a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp
+++ b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp
@@ -236,13 +236,13 @@ GraphicsPipeline::GraphicsPipeline(
Scheduler& scheduler_, BufferCache& buffer_cache_, TextureCache& texture_cache_,
vk::PipelineCache& pipeline_cache_, VideoCore::ShaderNotify* shader_notify,
const Device& device_, DescriptorPool& descriptor_pool,
- UpdateDescriptorQueue& update_descriptor_queue_, Common::ThreadWorker* worker_thread,
+ GuestDescriptorQueue& guest_descriptor_queue_, Common::ThreadWorker* worker_thread,
PipelineStatistics* pipeline_statistics, RenderPassCache& render_pass_cache,
const GraphicsPipelineCacheKey& key_, std::array<vk::ShaderModule, NUM_STAGES> stages,
const std::array<const Shader::Info*, NUM_STAGES>& infos)
: key{key_}, device{device_}, texture_cache{texture_cache_}, buffer_cache{buffer_cache_},
pipeline_cache(pipeline_cache_), scheduler{scheduler_},
- update_descriptor_queue{update_descriptor_queue_}, spv_modules{std::move(stages)} {
+ guest_descriptor_queue{guest_descriptor_queue_}, spv_modules{std::move(stages)} {
if (shader_notify) {
shader_notify->MarkShaderBuilding();
}
@@ -449,7 +449,7 @@ void GraphicsPipeline::ConfigureImpl(bool is_indexed) {
buffer_cache.UpdateGraphicsBuffers(is_indexed);
buffer_cache.BindHostGeometryBuffers(is_indexed);
- update_descriptor_queue.Acquire();
+ guest_descriptor_queue.Acquire();
RescalingPushConstant rescaling;
RenderAreaPushConstant render_area;
@@ -457,7 +457,7 @@ void GraphicsPipeline::ConfigureImpl(bool is_indexed) {
const VideoCommon::ImageViewInOut* views_it{views.data()};
const auto prepare_stage{[&](size_t stage) LAMBDA_FORCEINLINE {
buffer_cache.BindHostStageBuffers(stage);
- PushImageDescriptors(texture_cache, update_descriptor_queue, stage_infos[stage], rescaling,
+ PushImageDescriptors(texture_cache, guest_descriptor_queue, stage_infos[stage], rescaling,
samplers_it, views_it);
const auto& info{stage_infos[0]};
if (info.uses_render_area) {
@@ -499,7 +499,7 @@ void GraphicsPipeline::ConfigureDraw(const RescalingPushConstant& rescaling,
const bool is_rescaling{texture_cache.IsRescaling()};
const bool update_rescaling{scheduler.UpdateRescaling(is_rescaling)};
const bool bind_pipeline{scheduler.UpdateGraphicsPipeline(this)};
- const void* const descriptor_data{update_descriptor_queue.UpdateData()};
+ const void* const descriptor_data{guest_descriptor_queue.UpdateData()};
scheduler.Record([this, descriptor_data, bind_pipeline, rescaling_data = rescaling.Data(),
is_rescaling, update_rescaling,
uses_render_area = render_area.uses_render_area,
diff --git a/src/video_core/renderer_vulkan/vk_graphics_pipeline.h b/src/video_core/renderer_vulkan/vk_graphics_pipeline.h
index 67c657d0e..99e56e9ad 100644
--- a/src/video_core/renderer_vulkan/vk_graphics_pipeline.h
+++ b/src/video_core/renderer_vulkan/vk_graphics_pipeline.h
@@ -64,7 +64,6 @@ class RenderPassCache;
class RescalingPushConstant;
class RenderAreaPushConstant;
class Scheduler;
-class UpdateDescriptorQueue;
class GraphicsPipeline {
static constexpr size_t NUM_STAGES = Tegra::Engines::Maxwell3D::Regs::MaxShaderStage;
@@ -74,7 +73,7 @@ public:
Scheduler& scheduler, BufferCache& buffer_cache, TextureCache& texture_cache,
vk::PipelineCache& pipeline_cache, VideoCore::ShaderNotify* shader_notify,
const Device& device, DescriptorPool& descriptor_pool,
- UpdateDescriptorQueue& update_descriptor_queue, Common::ThreadWorker* worker_thread,
+ GuestDescriptorQueue& guest_descriptor_queue, Common::ThreadWorker* worker_thread,
PipelineStatistics* pipeline_statistics, RenderPassCache& render_pass_cache,
const GraphicsPipelineCacheKey& key, std::array<vk::ShaderModule, NUM_STAGES> stages,
const std::array<const Shader::Info*, NUM_STAGES>& infos);
@@ -133,7 +132,7 @@ private:
BufferCache& buffer_cache;
vk::PipelineCache& pipeline_cache;
Scheduler& scheduler;
- UpdateDescriptorQueue& update_descriptor_queue;
+ GuestDescriptorQueue& guest_descriptor_queue;
void (*configure_func)(GraphicsPipeline*, bool){};
diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
index e5219e7e0..66dfe5733 100644
--- a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
@@ -277,11 +277,11 @@ bool GraphicsPipelineCacheKey::operator==(const GraphicsPipelineCacheKey& rhs) c
PipelineCache::PipelineCache(RasterizerVulkan& rasterizer_, const Device& device_,
Scheduler& scheduler_, DescriptorPool& descriptor_pool_,
- UpdateDescriptorQueue& update_descriptor_queue_,
+ GuestDescriptorQueue& guest_descriptor_queue_,
RenderPassCache& render_pass_cache_, BufferCache& buffer_cache_,
TextureCache& texture_cache_, VideoCore::ShaderNotify& shader_notify_)
: VideoCommon::ShaderCache{rasterizer_}, device{device_}, scheduler{scheduler_},
- descriptor_pool{descriptor_pool_}, update_descriptor_queue{update_descriptor_queue_},
+ descriptor_pool{descriptor_pool_}, guest_descriptor_queue{guest_descriptor_queue_},
render_pass_cache{render_pass_cache_}, buffer_cache{buffer_cache_},
texture_cache{texture_cache_}, shader_notify{shader_notify_},
use_asynchronous_shaders{Settings::values.use_asynchronous_shaders.GetValue()},
@@ -643,7 +643,7 @@ std::unique_ptr<GraphicsPipeline> PipelineCache::CreateGraphicsPipeline(
Common::ThreadWorker* const thread_worker{build_in_parallel ? &workers : nullptr};
return std::make_unique<GraphicsPipeline>(
scheduler, buffer_cache, texture_cache, vulkan_pipeline_cache, &shader_notify, device,
- descriptor_pool, update_descriptor_queue, thread_worker, statistics, render_pass_cache, key,
+ descriptor_pool, guest_descriptor_queue, thread_worker, statistics, render_pass_cache, key,
std::move(modules), infos);
} catch (const Shader::Exception& exception) {
@@ -723,7 +723,7 @@ std::unique_ptr<ComputePipeline> PipelineCache::CreateComputePipeline(
}
Common::ThreadWorker* const thread_worker{build_in_parallel ? &workers : nullptr};
return std::make_unique<ComputePipeline>(device, vulkan_pipeline_cache, descriptor_pool,
- update_descriptor_queue, thread_worker, statistics,
+ guest_descriptor_queue, thread_worker, statistics,
&shader_notify, program.info, std::move(spv_module));
} catch (const Shader::Exception& exception) {
diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.h b/src/video_core/renderer_vulkan/vk_pipeline_cache.h
index 5171912d7..15aa7e224 100644
--- a/src/video_core/renderer_vulkan/vk_pipeline_cache.h
+++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.h
@@ -82,7 +82,6 @@ class PipelineStatistics;
class RasterizerVulkan;
class RenderPassCache;
class Scheduler;
-class UpdateDescriptorQueue;
using VideoCommon::ShaderInfo;
@@ -102,7 +101,7 @@ class PipelineCache : public VideoCommon::ShaderCache {
public:
explicit PipelineCache(RasterizerVulkan& rasterizer, const Device& device, Scheduler& scheduler,
DescriptorPool& descriptor_pool,
- UpdateDescriptorQueue& update_descriptor_queue,
+ GuestDescriptorQueue& guest_descriptor_queue,
RenderPassCache& render_pass_cache, BufferCache& buffer_cache,
TextureCache& texture_cache, VideoCore::ShaderNotify& shader_notify_);
~PipelineCache();
@@ -144,7 +143,7 @@ private:
const Device& device;
Scheduler& scheduler;
DescriptorPool& descriptor_pool;
- UpdateDescriptorQueue& update_descriptor_queue;
+ GuestDescriptorQueue& guest_descriptor_queue;
RenderPassCache& render_pass_cache;
BufferCache& buffer_cache;
TextureCache& texture_cache;
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
index d1489fc95..64bd2f6a5 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
@@ -160,17 +160,16 @@ RasterizerVulkan::RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra
: RasterizerAccelerated{cpu_memory_}, gpu{gpu_}, screen_info{screen_info_}, device{device_},
memory_allocator{memory_allocator_}, state_tracker{state_tracker_}, scheduler{scheduler_},
staging_pool(device, memory_allocator, scheduler), descriptor_pool(device, scheduler),
- update_descriptor_queue(device, scheduler),
- blit_image(device, scheduler, state_tracker, descriptor_pool),
- render_pass_cache(device), texture_cache_runtime{device, scheduler,
- memory_allocator, staging_pool,
- blit_image, render_pass_cache,
- descriptor_pool, update_descriptor_queue},
+ guest_descriptor_queue(device, scheduler), compute_pass_descriptor_queue(device, scheduler),
+ blit_image(device, scheduler, state_tracker, descriptor_pool), render_pass_cache(device),
+ texture_cache_runtime{
+ device, scheduler, memory_allocator, staging_pool,
+ blit_image, render_pass_cache, descriptor_pool, compute_pass_descriptor_queue},
texture_cache(texture_cache_runtime, *this),
buffer_cache_runtime(device, memory_allocator, scheduler, staging_pool,
- update_descriptor_queue, descriptor_pool),
+ guest_descriptor_queue, compute_pass_descriptor_queue, descriptor_pool),
buffer_cache(*this, cpu_memory_, buffer_cache_runtime),
- pipeline_cache(*this, device, scheduler, descriptor_pool, update_descriptor_queue,
+ pipeline_cache(*this, device, scheduler, descriptor_pool, guest_descriptor_queue,
render_pass_cache, buffer_cache, texture_cache, gpu.ShaderNotify()),
query_cache{*this, cpu_memory_, device, scheduler},
accelerate_dma(buffer_cache, texture_cache, scheduler),
@@ -502,6 +501,22 @@ bool RasterizerVulkan::MustFlushRegion(VAddr addr, u64 size, VideoCommon::CacheT
return false;
}
+VideoCore::RasterizerDownloadArea RasterizerVulkan::GetFlushArea(VAddr addr, u64 size) {
+ {
+ std::scoped_lock lock{texture_cache.mutex};
+ auto area = texture_cache.GetFlushArea(addr, size);
+ if (area) {
+ return *area;
+ }
+ }
+ VideoCore::RasterizerDownloadArea new_area{
+ .start_address = Common::AlignDown(addr, Core::Memory::YUZU_PAGESIZE),
+ .end_address = Common::AlignUp(addr + size, Core::Memory::YUZU_PAGESIZE),
+ .preemtive = true,
+ };
+ return new_area;
+}
+
void RasterizerVulkan::InvalidateRegion(VAddr addr, u64 size, VideoCommon::CacheType which) {
if (addr == 0 || size == 0) {
return;
@@ -598,7 +613,7 @@ void RasterizerVulkan::SignalSyncPoint(u32 value) {
}
void RasterizerVulkan::SignalReference() {
- fence_manager.SignalOrdering();
+ fence_manager.SignalReference();
}
void RasterizerVulkan::ReleaseFences() {
@@ -631,7 +646,7 @@ void RasterizerVulkan::WaitForIdle() {
cmdbuf.SetEvent(event, flags);
cmdbuf.WaitEvents(event, flags, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, {}, {}, {});
});
- SignalReference();
+ fence_manager.SignalOrdering();
}
void RasterizerVulkan::FragmentBarrier() {
@@ -653,7 +668,8 @@ void RasterizerVulkan::FlushCommands() {
void RasterizerVulkan::TickFrame() {
draw_counter = 0;
- update_descriptor_queue.TickFrame();
+ guest_descriptor_queue.TickFrame();
+ compute_pass_descriptor_queue.TickFrame();
fence_manager.TickFrame();
staging_pool.TickFrame();
{
@@ -777,7 +793,7 @@ bool AccelerateDMA::DmaBufferImageCopy(const Tegra::DMA::ImageCopy& copy_info,
const Tegra::DMA::BufferOperand& buffer_operand,
const Tegra::DMA::ImageOperand& image_operand) {
std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
- const auto image_id = texture_cache.DmaImageId(image_operand);
+ const auto image_id = texture_cache.DmaImageId(image_operand, IS_IMAGE_UPLOAD);
if (image_id == VideoCommon::NULL_IMAGE_ID) {
return false;
}
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.h b/src/video_core/renderer_vulkan/vk_rasterizer.h
index 1659fbc13..b39710b3c 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.h
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.h
@@ -92,6 +92,7 @@ public:
VideoCommon::CacheType which = VideoCommon::CacheType::All) override;
bool MustFlushRegion(VAddr addr, u64 size,
VideoCommon::CacheType which = VideoCommon::CacheType::All) override;
+ VideoCore::RasterizerDownloadArea GetFlushArea(VAddr addr, u64 size) override;
void InvalidateRegion(VAddr addr, u64 size,
VideoCommon::CacheType which = VideoCommon::CacheType::All) override;
void InnerInvalidation(std::span<const std::pair<VAddr, std::size_t>> sequences) override;
@@ -183,7 +184,8 @@ private:
StagingBufferPool staging_pool;
DescriptorPool descriptor_pool;
- UpdateDescriptorQueue update_descriptor_queue;
+ GuestDescriptorQueue guest_descriptor_queue;
+ ComputePassDescriptorQueue compute_pass_descriptor_queue;
BlitImageHelper blit_image;
RenderPassCache render_pass_cache;
diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.cpp b/src/video_core/renderer_vulkan/vk_texture_cache.cpp
index d0a7d8f35..4d0481f2a 100644
--- a/src/video_core/renderer_vulkan/vk_texture_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_texture_cache.cpp
@@ -798,13 +798,13 @@ TextureCacheRuntime::TextureCacheRuntime(const Device& device_, Scheduler& sched
BlitImageHelper& blit_image_helper_,
RenderPassCache& render_pass_cache_,
DescriptorPool& descriptor_pool,
- UpdateDescriptorQueue& update_descriptor_queue)
+ ComputePassDescriptorQueue& compute_pass_descriptor_queue)
: device{device_}, scheduler{scheduler_}, memory_allocator{memory_allocator_},
staging_buffer_pool{staging_buffer_pool_}, blit_image_helper{blit_image_helper_},
render_pass_cache{render_pass_cache_}, resolution{Settings::values.resolution_info} {
if (Settings::values.accelerate_astc) {
astc_decoder_pass.emplace(device, scheduler, descriptor_pool, staging_buffer_pool,
- update_descriptor_queue, memory_allocator);
+ compute_pass_descriptor_queue, memory_allocator);
}
}
@@ -1268,7 +1268,7 @@ Image::Image(TextureCacheRuntime& runtime_, const ImageInfo& info_, GPUVAddr gpu
if (IsPixelFormatASTC(info.format) && !runtime->device.IsOptimalAstcSupported()) {
if (Settings::values.async_astc.GetValue()) {
flags |= VideoCommon::ImageFlagBits::AsynchronousDecode;
- } else if (Settings::values.accelerate_astc.GetValue()) {
+ } else if (Settings::values.accelerate_astc.GetValue() && info.size.depth == 1) {
flags |= VideoCommon::ImageFlagBits::AcceleratedUpload;
}
flags |= VideoCommon::ImageFlagBits::Converted;
@@ -1584,8 +1584,9 @@ bool Image::NeedsScaleHelper() const {
ImageView::ImageView(TextureCacheRuntime& runtime, const VideoCommon::ImageViewInfo& info,
ImageId image_id_, Image& image)
- : VideoCommon::ImageViewBase{info, image.info, image_id_}, device{&runtime.device},
- image_handle{image.Handle()}, samples(ConvertSampleCount(image.info.num_samples)) {
+ : VideoCommon::ImageViewBase{info, image.info, image_id_, image.gpu_addr},
+ device{&runtime.device}, image_handle{image.Handle()},
+ samples(ConvertSampleCount(image.info.num_samples)) {
using Shader::TextureType;
const VkImageAspectFlags aspect_mask = ImageViewAspectMask(info);
@@ -1631,7 +1632,7 @@ ImageView::ImageView(TextureCacheRuntime& runtime, const VideoCommon::ImageViewI
}
vk::ImageView handle = device->GetLogical().CreateImageView(ci);
if (device->HasDebuggingToolAttached()) {
- handle.SetObjectNameEXT(VideoCommon::Name(*this).c_str());
+ handle.SetObjectNameEXT(VideoCommon::Name(*this, gpu_addr).c_str());
}
image_views[static_cast<size_t>(tex_type)] = std::move(handle);
};
@@ -1672,7 +1673,7 @@ ImageView::ImageView(TextureCacheRuntime& runtime, const VideoCommon::ImageViewI
ImageView::ImageView(TextureCacheRuntime&, const VideoCommon::ImageInfo& info,
const VideoCommon::ImageViewInfo& view_info, GPUVAddr gpu_addr_)
- : VideoCommon::ImageViewBase{info, view_info}, gpu_addr{gpu_addr_},
+ : VideoCommon::ImageViewBase{info, view_info, gpu_addr_},
buffer_size{VideoCommon::CalculateGuestSizeInBytes(info)} {}
ImageView::ImageView(TextureCacheRuntime&, const VideoCommon::NullImageViewParams& params)
@@ -1863,6 +1864,7 @@ void Framebuffer::CreateFramebuffer(TextureCacheRuntime& runtime,
num_layers = std::max(num_layers, color_buffer->range.extent.layers);
images[num_images] = color_buffer->ImageHandle();
image_ranges[num_images] = MakeSubresourceRange(color_buffer);
+ rt_map[index] = num_images;
samples = color_buffer->Samples();
++num_images;
}
diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.h b/src/video_core/renderer_vulkan/vk_texture_cache.h
index c656c5386..4166b3d20 100644
--- a/src/video_core/renderer_vulkan/vk_texture_cache.h
+++ b/src/video_core/renderer_vulkan/vk_texture_cache.h
@@ -34,7 +34,6 @@ class ImageView;
class Framebuffer;
class RenderPassCache;
class StagingBufferPool;
-class UpdateDescriptorQueue;
class Scheduler;
class TextureCacheRuntime {
@@ -45,7 +44,7 @@ public:
BlitImageHelper& blit_image_helper_,
RenderPassCache& render_pass_cache_,
DescriptorPool& descriptor_pool,
- UpdateDescriptorQueue& update_descriptor_queue);
+ ComputePassDescriptorQueue& compute_pass_descriptor_queue);
void Finish();
@@ -265,7 +264,6 @@ private:
VkImage image_handle = VK_NULL_HANDLE;
VkImageView render_target = VK_NULL_HANDLE;
VkSampleCountFlagBits samples = VK_SAMPLE_COUNT_1_BIT;
- GPUVAddr gpu_addr = 0;
u32 buffer_size = 0;
};
@@ -336,7 +334,7 @@ public:
}
[[nodiscard]] bool HasAspectColorBit(size_t index) const noexcept {
- return (image_ranges.at(index).aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) != 0;
+ return (image_ranges.at(rt_map[index]).aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) != 0;
}
[[nodiscard]] bool HasAspectDepthBit() const noexcept {
@@ -356,6 +354,7 @@ private:
u32 num_images = 0;
std::array<VkImage, 9> images{};
std::array<VkImageSubresourceRange, 9> image_ranges{};
+ std::array<size_t, NUM_RT> rt_map{};
bool has_depth{};
bool has_stencil{};
};
diff --git a/src/video_core/renderer_vulkan/vk_update_descriptor.h b/src/video_core/renderer_vulkan/vk_update_descriptor.h
index 1c1a7020b..310fb551a 100644
--- a/src/video_core/renderer_vulkan/vk_update_descriptor.h
+++ b/src/video_core/renderer_vulkan/vk_update_descriptor.h
@@ -32,7 +32,7 @@ class UpdateDescriptorQueue final {
// This should be plenty for the vast majority of cases. Most desktop platforms only
// provide up to 3 swapchain images.
static constexpr size_t FRAMES_IN_FLIGHT = 5;
- static constexpr size_t FRAME_PAYLOAD_SIZE = 0x10000;
+ static constexpr size_t FRAME_PAYLOAD_SIZE = 0x20000;
static constexpr size_t PAYLOAD_SIZE = FRAME_PAYLOAD_SIZE * FRAMES_IN_FLIGHT;
public:
@@ -86,4 +86,8 @@ private:
std::array<DescriptorUpdateEntry, PAYLOAD_SIZE> payload;
};
+// TODO: should these be separate classes instead?
+using GuestDescriptorQueue = UpdateDescriptorQueue;
+using ComputePassDescriptorQueue = UpdateDescriptorQueue;
+
} // namespace Vulkan
diff --git a/src/video_core/texture_cache/formatter.cpp b/src/video_core/texture_cache/formatter.cpp
index 30f72361d..6279d8e9e 100644
--- a/src/video_core/texture_cache/formatter.cpp
+++ b/src/video_core/texture_cache/formatter.cpp
@@ -46,7 +46,7 @@ std::string Name(const ImageBase& image) {
return "Invalid";
}
-std::string Name(const ImageViewBase& image_view) {
+std::string Name(const ImageViewBase& image_view, GPUVAddr addr) {
const u32 width = image_view.size.width;
const u32 height = image_view.size.height;
const u32 depth = image_view.size.depth;
@@ -56,23 +56,25 @@ std::string Name(const ImageViewBase& image_view) {
const std::string level = num_levels > 1 ? fmt::format(":{}", num_levels) : "";
switch (image_view.type) {
case ImageViewType::e1D:
- return fmt::format("ImageView 1D {}{}", width, level);
+ return fmt::format("ImageView 1D 0x{:X} {}{}", addr, width, level);
case ImageViewType::e2D:
- return fmt::format("ImageView 2D {}x{}{}", width, height, level);
+ return fmt::format("ImageView 2D 0x{:X} {}x{}{}", addr, width, height, level);
case ImageViewType::Cube:
- return fmt::format("ImageView Cube {}x{}{}", width, height, level);
+ return fmt::format("ImageView Cube 0x{:X} {}x{}{}", addr, width, height, level);
case ImageViewType::e3D:
- return fmt::format("ImageView 3D {}x{}x{}{}", width, height, depth, level);
+ return fmt::format("ImageView 3D 0x{:X} {}x{}x{}{}", addr, width, height, depth, level);
case ImageViewType::e1DArray:
- return fmt::format("ImageView 1DArray {}{}|{}", width, level, num_layers);
+ return fmt::format("ImageView 1DArray 0x{:X} {}{}|{}", addr, width, level, num_layers);
case ImageViewType::e2DArray:
- return fmt::format("ImageView 2DArray {}x{}{}|{}", width, height, level, num_layers);
+ return fmt::format("ImageView 2DArray 0x{:X} {}x{}{}|{}", addr, width, height, level,
+ num_layers);
case ImageViewType::CubeArray:
- return fmt::format("ImageView CubeArray {}x{}{}|{}", width, height, level, num_layers);
+ return fmt::format("ImageView CubeArray 0x{:X} {}x{}{}|{}", addr, width, height, level,
+ num_layers);
case ImageViewType::Rect:
- return fmt::format("ImageView Rect {}x{}{}", width, height, level);
+ return fmt::format("ImageView Rect 0x{:X} {}x{}{}", addr, width, height, level);
case ImageViewType::Buffer:
- return fmt::format("BufferView {}", width);
+ return fmt::format("BufferView 0x{:X} {}", addr, width);
}
return "Invalid";
}
diff --git a/src/video_core/texture_cache/formatter.h b/src/video_core/texture_cache/formatter.h
index b97147797..9ee57a076 100644
--- a/src/video_core/texture_cache/formatter.h
+++ b/src/video_core/texture_cache/formatter.h
@@ -274,7 +274,7 @@ struct RenderTargets;
[[nodiscard]] std::string Name(const ImageBase& image);
-[[nodiscard]] std::string Name(const ImageViewBase& image_view);
+[[nodiscard]] std::string Name(const ImageViewBase& image_view, GPUVAddr addr);
[[nodiscard]] std::string Name(const RenderTargets& render_targets);
diff --git a/src/video_core/texture_cache/image_info.cpp b/src/video_core/texture_cache/image_info.cpp
index 11f3f78a1..e8ddde691 100644
--- a/src/video_core/texture_cache/image_info.cpp
+++ b/src/video_core/texture_cache/image_info.cpp
@@ -4,6 +4,7 @@
#include <fmt/format.h>
#include "common/assert.h"
+#include "common/settings.h"
#include "video_core/surface.h"
#include "video_core/texture_cache/format_lookup_table.h"
#include "video_core/texture_cache/image_info.h"
@@ -22,6 +23,8 @@ using VideoCore::Surface::PixelFormat;
using VideoCore::Surface::SurfaceType;
ImageInfo::ImageInfo(const TICEntry& config) noexcept {
+ forced_flushed = config.IsPitchLinear() && !Settings::values.use_reactive_flushing.GetValue();
+ dma_downloaded = forced_flushed;
format = PixelFormatFromTextureInfo(config.format, config.r_type, config.g_type, config.b_type,
config.a_type, config.srgb_conversion);
num_samples = NumSamples(config.msaa_mode);
@@ -117,6 +120,9 @@ ImageInfo::ImageInfo(const TICEntry& config) noexcept {
ImageInfo::ImageInfo(const Maxwell3D::Regs::RenderTargetConfig& ct,
Tegra::Texture::MsaaMode msaa_mode) noexcept {
+ forced_flushed =
+ ct.tile_mode.is_pitch_linear && !Settings::values.use_reactive_flushing.GetValue();
+ dma_downloaded = forced_flushed;
format = VideoCore::Surface::PixelFormatFromRenderTargetFormat(ct.format);
rescaleable = false;
if (ct.tile_mode.is_pitch_linear) {
@@ -155,6 +161,9 @@ ImageInfo::ImageInfo(const Maxwell3D::Regs::RenderTargetConfig& ct,
ImageInfo::ImageInfo(const Maxwell3D::Regs::Zeta& zt, const Maxwell3D::Regs::ZetaSize& zt_size,
Tegra::Texture::MsaaMode msaa_mode) noexcept {
+ forced_flushed =
+ zt.tile_mode.is_pitch_linear && !Settings::values.use_reactive_flushing.GetValue();
+ dma_downloaded = forced_flushed;
format = VideoCore::Surface::PixelFormatFromDepthFormat(zt.format);
size.width = zt_size.width;
size.height = zt_size.height;
@@ -195,6 +204,9 @@ ImageInfo::ImageInfo(const Maxwell3D::Regs::Zeta& zt, const Maxwell3D::Regs::Zet
ImageInfo::ImageInfo(const Fermi2D::Surface& config) noexcept {
UNIMPLEMENTED_IF_MSG(config.layer != 0, "Surface layer is not zero");
+ forced_flushed = config.linear == Fermi2D::MemoryLayout::Pitch &&
+ !Settings::values.use_reactive_flushing.GetValue();
+ dma_downloaded = forced_flushed;
format = VideoCore::Surface::PixelFormatFromRenderTargetFormat(config.format);
rescaleable = false;
if (config.linear == Fermi2D::MemoryLayout::Pitch) {
diff --git a/src/video_core/texture_cache/image_info.h b/src/video_core/texture_cache/image_info.h
index 4b7dfa315..8a4cb0cbd 100644
--- a/src/video_core/texture_cache/image_info.h
+++ b/src/video_core/texture_cache/image_info.h
@@ -39,6 +39,8 @@ struct ImageInfo {
u32 tile_width_spacing = 0;
bool rescaleable = false;
bool downscaleable = false;
+ bool forced_flushed = false;
+ bool dma_downloaded = false;
};
} // namespace VideoCommon
diff --git a/src/video_core/texture_cache/image_view_base.cpp b/src/video_core/texture_cache/image_view_base.cpp
index 04fb84bfa..d134b6738 100644
--- a/src/video_core/texture_cache/image_view_base.cpp
+++ b/src/video_core/texture_cache/image_view_base.cpp
@@ -4,7 +4,6 @@
#include <algorithm>
#include "common/assert.h"
-#include "common/settings.h"
#include "video_core/compatible_formats.h"
#include "video_core/surface.h"
#include "video_core/texture_cache/formatter.h"
@@ -16,8 +15,8 @@
namespace VideoCommon {
ImageViewBase::ImageViewBase(const ImageViewInfo& info, const ImageInfo& image_info,
- ImageId image_id_)
- : image_id{image_id_}, format{info.format}, type{info.type}, range{info.range},
+ ImageId image_id_, GPUVAddr addr)
+ : image_id{image_id_}, gpu_addr{addr}, format{info.format}, type{info.type}, range{info.range},
size{
.width = std::max(image_info.size.width >> range.base.level, 1u),
.height = std::max(image_info.size.height >> range.base.level, 1u),
@@ -26,8 +25,7 @@ ImageViewBase::ImageViewBase(const ImageViewInfo& info, const ImageInfo& image_i
ASSERT_MSG(VideoCore::Surface::IsViewCompatible(image_info.format, info.format, false, true),
"Image view format {} is incompatible with image format {}", info.format,
image_info.format);
- const bool is_async = Settings::values.use_asynchronous_gpu_emulation.GetValue();
- if (image_info.type == ImageType::Linear && is_async) {
+ if (image_info.forced_flushed) {
flags |= ImageViewFlagBits::PreemtiveDownload;
}
if (image_info.type == ImageType::e3D && info.type != ImageViewType::e3D) {
@@ -35,8 +33,8 @@ ImageViewBase::ImageViewBase(const ImageViewInfo& info, const ImageInfo& image_i
}
}
-ImageViewBase::ImageViewBase(const ImageInfo& info, const ImageViewInfo& view_info)
- : image_id{NULL_IMAGE_ID}, format{info.format}, type{ImageViewType::Buffer},
+ImageViewBase::ImageViewBase(const ImageInfo& info, const ImageViewInfo& view_info, GPUVAddr addr)
+ : image_id{NULL_IMAGE_ID}, gpu_addr{addr}, format{info.format}, type{ImageViewType::Buffer},
size{
.width = info.size.width,
.height = 1,
diff --git a/src/video_core/texture_cache/image_view_base.h b/src/video_core/texture_cache/image_view_base.h
index 69c9776e7..a25ae1d4a 100644
--- a/src/video_core/texture_cache/image_view_base.h
+++ b/src/video_core/texture_cache/image_view_base.h
@@ -24,9 +24,9 @@ enum class ImageViewFlagBits : u16 {
DECLARE_ENUM_FLAG_OPERATORS(ImageViewFlagBits)
struct ImageViewBase {
- explicit ImageViewBase(const ImageViewInfo& info, const ImageInfo& image_info,
- ImageId image_id);
- explicit ImageViewBase(const ImageInfo& info, const ImageViewInfo& view_info);
+ explicit ImageViewBase(const ImageViewInfo& info, const ImageInfo& image_info, ImageId image_id,
+ GPUVAddr addr);
+ explicit ImageViewBase(const ImageInfo& info, const ImageViewInfo& view_info, GPUVAddr addr);
explicit ImageViewBase(const NullImageViewParams&);
[[nodiscard]] bool IsBuffer() const noexcept {
@@ -34,6 +34,7 @@ struct ImageViewBase {
}
ImageId image_id{};
+ GPUVAddr gpu_addr = 0;
PixelFormat format{};
ImageViewType type{};
SubresourceRange range;
diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h
index f335009d0..b24086fce 100644
--- a/src/video_core/texture_cache/texture_cache.h
+++ b/src/video_core/texture_cache/texture_cache.h
@@ -491,6 +491,32 @@ void TextureCache<P>::DownloadMemory(VAddr cpu_addr, size_t size) {
}
template <class P>
+std::optional<VideoCore::RasterizerDownloadArea> TextureCache<P>::GetFlushArea(VAddr cpu_addr,
+ u64 size) {
+ std::optional<VideoCore::RasterizerDownloadArea> area{};
+ ForEachImageInRegion(cpu_addr, size, [&](ImageId, ImageBase& image) {
+ if (False(image.flags & ImageFlagBits::GpuModified)) {
+ return;
+ }
+ if (!area) {
+ area.emplace();
+ area->start_address = cpu_addr;
+ area->end_address = cpu_addr + size;
+ area->preemtive = true;
+ }
+ area->start_address = std::min(area->start_address, image.cpu_addr);
+ area->end_address = std::max(area->end_address, image.cpu_addr_end);
+ for (auto image_view_id : image.image_view_ids) {
+ auto& image_view = slot_image_views[image_view_id];
+ image_view.flags |= ImageViewFlagBits::PreemtiveDownload;
+ }
+ area->preemtive &= image.info.forced_flushed;
+ image.info.forced_flushed = true;
+ });
+ return area;
+}
+
+template <class P>
void TextureCache<P>::UnmapMemory(VAddr cpu_addr, size_t size) {
std::vector<ImageId> deleted_images;
ForEachImageInRegion(cpu_addr, size, [&](ImageId id, Image&) { deleted_images.push_back(id); });
@@ -683,6 +709,7 @@ void TextureCache<P>::CommitAsyncFlushes() {
download_info.async_buffer_id = last_async_buffer_id;
}
}
+
if (any_none_dma) {
auto download_map = runtime.DownloadStagingBuffer(total_size_bytes, true);
for (const PendingDownload& download_info : download_ids) {
@@ -695,6 +722,7 @@ void TextureCache<P>::CommitAsyncFlushes() {
}
uncommitted_async_buffers.emplace_back(download_map);
}
+
async_buffers.emplace_back(std::move(uncommitted_async_buffers));
uncommitted_async_buffers.clear();
}
@@ -783,17 +811,22 @@ void TextureCache<P>::PopAsyncFlushes() {
}
template <class P>
-ImageId TextureCache<P>::DmaImageId(const Tegra::DMA::ImageOperand& operand) {
+ImageId TextureCache<P>::DmaImageId(const Tegra::DMA::ImageOperand& operand, bool is_upload) {
const ImageInfo dst_info(operand);
const ImageId dst_id = FindDMAImage(dst_info, operand.address);
if (!dst_id) {
return NULL_IMAGE_ID;
}
- const auto& image = slot_images[dst_id];
+ auto& image = slot_images[dst_id];
if (False(image.flags & ImageFlagBits::GpuModified)) {
// No need to waste time on an image that's synced with guest
return NULL_IMAGE_ID;
}
+ if (!is_upload && !image.info.dma_downloaded) {
+ // Force a full sync.
+ image.info.dma_downloaded = true;
+ return NULL_IMAGE_ID;
+ }
const auto base = image.TryFindBase(operand.address);
if (!base) {
return NULL_IMAGE_ID;
@@ -1290,7 +1323,6 @@ ImageId TextureCache<P>::JoinImages(const ImageInfo& info, GPUVAddr gpu_addr, VA
all_siblings.push_back(overlap_id);
} else {
bad_overlap_ids.push_back(overlap_id);
- overlap.flags |= ImageFlagBits::BadOverlap;
}
};
ForEachImageInRegion(cpu_addr, size_bytes, region_check);
@@ -1359,6 +1391,12 @@ ImageId TextureCache<P>::JoinImages(const ImageInfo& info, GPUVAddr gpu_addr, VA
ScaleDown(new_image);
}
+ std::ranges::sort(overlap_ids, [this](const ImageId lhs, const ImageId rhs) {
+ const ImageBase& lhs_image = slot_images[lhs];
+ const ImageBase& rhs_image = slot_images[rhs];
+ return lhs_image.modification_tick < rhs_image.modification_tick;
+ });
+
for (const ImageId overlap_id : overlap_ids) {
Image& overlap = slot_images[overlap_id];
if (True(overlap.flags & ImageFlagBits::GpuModified)) {
@@ -1395,7 +1433,12 @@ ImageId TextureCache<P>::JoinImages(const ImageInfo& info, GPUVAddr gpu_addr, VA
ImageBase& aliased = slot_images[aliased_id];
aliased.overlapping_images.push_back(new_image_id);
new_image.overlapping_images.push_back(aliased_id);
- new_image.flags |= ImageFlagBits::BadOverlap;
+ if (aliased.info.resources.levels == 1 && aliased.overlapping_images.size() > 1) {
+ aliased.flags |= ImageFlagBits::BadOverlap;
+ }
+ if (new_image.info.resources.levels == 1 && new_image.overlapping_images.size() > 1) {
+ new_image.flags |= ImageFlagBits::BadOverlap;
+ }
}
RegisterImage(new_image_id);
return new_image_id;
@@ -1426,7 +1469,7 @@ std::optional<typename TextureCache<P>::BlitImages> TextureCache<P>::GetBlitImag
if (!copy.must_accelerate) {
do {
if (!src_id && !dst_id) {
- return std::nullopt;
+ break;
}
if (src_id && True(slot_images[src_id].flags & ImageFlagBits::GpuModified)) {
break;
diff --git a/src/video_core/texture_cache/texture_cache_base.h b/src/video_core/texture_cache/texture_cache_base.h
index 758b7e212..0720494e5 100644
--- a/src/video_core/texture_cache/texture_cache_base.h
+++ b/src/video_core/texture_cache/texture_cache_base.h
@@ -179,6 +179,8 @@ public:
/// Download contents of host images to guest memory in a region
void DownloadMemory(VAddr cpu_addr, size_t size);
+ std::optional<VideoCore::RasterizerDownloadArea> GetFlushArea(VAddr cpu_addr, u64 size);
+
/// Remove images in a region
void UnmapMemory(VAddr cpu_addr, size_t size);
@@ -205,7 +207,7 @@ public:
/// Pop asynchronous downloads
void PopAsyncFlushes();
- [[nodiscard]] ImageId DmaImageId(const Tegra::DMA::ImageOperand& operand);
+ [[nodiscard]] ImageId DmaImageId(const Tegra::DMA::ImageOperand& operand, bool is_upload);
[[nodiscard]] std::pair<Image*, BufferImageCopy> DmaBufferImageCopy(
const Tegra::DMA::ImageCopy& copy_info, const Tegra::DMA::BufferOperand& buffer_operand,
diff --git a/src/video_core/texture_cache/util.cpp b/src/video_core/texture_cache/util.cpp
index de37db684..f1071aa23 100644
--- a/src/video_core/texture_cache/util.cpp
+++ b/src/video_core/texture_cache/util.cpp
@@ -896,11 +896,11 @@ void ConvertImage(std::span<const u8> input, const ImageInfo& info, std::span<u8
ASSERT(copy.buffer_row_length == Common::AlignUp(mip_size.width, tile_size.width));
ASSERT(copy.buffer_image_height == Common::AlignUp(mip_size.height, tile_size.height));
if (IsPixelFormatASTC(info.format)) {
- ASSERT(copy.image_extent.depth == 1);
- Tegra::Texture::ASTC::Decompress(input.subspan(copy.buffer_offset),
- copy.image_extent.width, copy.image_extent.height,
- copy.image_subresource.num_layers, tile_size.width,
- tile_size.height, output.subspan(output_offset));
+ Tegra::Texture::ASTC::Decompress(
+ input.subspan(copy.buffer_offset), copy.image_extent.width,
+ copy.image_extent.height,
+ copy.image_subresource.num_layers * copy.image_extent.depth, tile_size.width,
+ tile_size.height, output.subspan(output_offset));
} else {
DecompressBC4(input.subspan(copy.buffer_offset), copy.image_extent,
output.subspan(output_offset));
diff --git a/src/video_core/vulkan_common/vulkan_device.cpp b/src/video_core/vulkan_common/vulkan_device.cpp
index 6ffca2af2..161f050b8 100644
--- a/src/video_core/vulkan_common/vulkan_device.cpp
+++ b/src/video_core/vulkan_common/vulkan_device.cpp
@@ -1009,6 +1009,8 @@ void Device::CollectPhysicalMemoryInfo() {
device_access_memory += mem_properties.memoryHeaps[element].size;
}
if (!is_integrated) {
+ const u64 reserve_memory = std::min<u64>(device_access_memory / 8, 1_GiB);
+ device_access_memory -= reserve_memory;
return;
}
const s64 available_memory = static_cast<s64>(device_access_memory - device_initial_usage);
diff --git a/src/video_core/vulkan_common/vulkan_device.h b/src/video_core/vulkan_common/vulkan_device.h
index 7d5018151..5f1c63ff9 100644
--- a/src/video_core/vulkan_common/vulkan_device.h
+++ b/src/video_core/vulkan_common/vulkan_device.h
@@ -10,6 +10,7 @@
#include <vector>
#include "common/common_types.h"
+#include "common/settings.h"
#include "video_core/vulkan_common/vulkan_wrapper.h"
// Define all features which may be used by the implementation here.
@@ -510,7 +511,7 @@ public:
/// Returns true when a known debugging tool is attached.
bool HasDebuggingToolAttached() const {
- return has_renderdoc || has_nsight_graphics;
+ return has_renderdoc || has_nsight_graphics || Settings::values.renderer_debug.GetValue();
}
/// Returns true when the device does not properly support cube compatibility.
diff --git a/src/video_core/vulkan_common/vulkan_memory_allocator.cpp b/src/video_core/vulkan_common/vulkan_memory_allocator.cpp
index 1732866e0..e28a556f8 100644
--- a/src/video_core/vulkan_common/vulkan_memory_allocator.cpp
+++ b/src/video_core/vulkan_common/vulkan_memory_allocator.cpp
@@ -147,7 +147,7 @@ public:
/// Returns whether this allocation is compatible with the arguments.
[[nodiscard]] bool IsCompatible(VkMemoryPropertyFlags flags, u32 type_mask) const {
- return (flags & property_flags) == property_flags && (type_mask & shifted_memory_type) != 0;
+ return (flags & property_flags) == flags && (type_mask & shifted_memory_type) != 0;
}
private: