summaryrefslogtreecommitdiffstats
path: root/src/video_core/buffer_cache/buffer_cache.h
diff options
context:
space:
mode:
Diffstat (limited to 'src/video_core/buffer_cache/buffer_cache.h')
-rw-r--r--src/video_core/buffer_cache/buffer_cache.h176
1 files changed, 82 insertions, 94 deletions
diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h
index e534e1e9c..98756e4da 100644
--- a/src/video_core/buffer_cache/buffer_cache.h
+++ b/src/video_core/buffer_cache/buffer_cache.h
@@ -23,8 +23,6 @@ BufferCache<P>::BufferCache(VideoCore::RasterizerInterface& rasterizer_,
common_ranges.clear();
inline_buffer_id = NULL_BUFFER_ID;
- active_async_buffers = !Settings::IsGPULevelHigh();
-
if (!runtime.CanReportMemoryUsage()) {
minimum_memory = DEFAULT_EXPECTED_MEMORY;
critical_memory = DEFAULT_CRITICAL_MEMORY;
@@ -75,8 +73,6 @@ void BufferCache<P>::TickFrame() {
uniform_cache_hits[0] = 0;
uniform_cache_shots[0] = 0;
- active_async_buffers = !Settings::IsGPULevelHigh();
-
const bool skip_preferred = hits * 256 < shots * 251;
uniform_buffer_skip_cache_size = skip_preferred ? DEFAULT_SKIP_CACHE_SIZE : 0;
@@ -100,51 +96,50 @@ void BufferCache<P>::TickFrame() {
template <class P>
void BufferCache<P>::WriteMemory(VAddr cpu_addr, u64 size) {
- memory_tracker.MarkRegionAsCpuModified(cpu_addr, size);
if (memory_tracker.IsRegionGpuModified(cpu_addr, size)) {
const IntervalType subtract_interval{cpu_addr, cpu_addr + size};
ClearDownload(subtract_interval);
common_ranges.subtract(subtract_interval);
}
+ memory_tracker.MarkRegionAsCpuModified(cpu_addr, size);
}
template <class P>
void BufferCache<P>::CachedWriteMemory(VAddr cpu_addr, u64 size) {
memory_tracker.CachedCpuWrite(cpu_addr, size);
- const IntervalType add_interval{Common::AlignDown(cpu_addr, YUZU_PAGESIZE),
- Common::AlignUp(cpu_addr + size, YUZU_PAGESIZE)};
- cached_ranges.add(add_interval);
+}
+
+template <class P>
+std::optional<VideoCore::RasterizerDownloadArea> BufferCache<P>::GetFlushArea(VAddr cpu_addr,
+ u64 size) {
+ std::optional<VideoCore::RasterizerDownloadArea> area{};
+ area.emplace();
+ VAddr cpu_addr_start_aligned = Common::AlignDown(cpu_addr, Core::Memory::YUZU_PAGESIZE);
+ VAddr cpu_addr_end_aligned = Common::AlignUp(cpu_addr + size, Core::Memory::YUZU_PAGESIZE);
+ area->start_address = cpu_addr_start_aligned;
+ area->end_address = cpu_addr_end_aligned;
+ if (memory_tracker.IsRegionPreflushable(cpu_addr, size)) {
+ area->preemtive = true;
+ return area;
+ };
+ area->preemtive =
+ !IsRegionGpuModified(cpu_addr_start_aligned, cpu_addr_end_aligned - cpu_addr_start_aligned);
+ memory_tracker.MarkRegionAsPreflushable(cpu_addr_start_aligned,
+ cpu_addr_end_aligned - cpu_addr_start_aligned);
+ return area;
}
template <class P>
void BufferCache<P>::DownloadMemory(VAddr cpu_addr, u64 size) {
- WaitOnAsyncFlushes(cpu_addr, size);
ForEachBufferInRange(cpu_addr, size, [&](BufferId, Buffer& buffer) {
DownloadBufferMemory(buffer, cpu_addr, size);
});
}
template <class P>
-void BufferCache<P>::WaitOnAsyncFlushes(VAddr cpu_addr, u64 size) {
- bool must_wait = false;
- ForEachInOverlapCounter(async_downloads, cpu_addr, size,
- [&](VAddr, VAddr, int) { must_wait = true; });
- bool must_release = false;
- ForEachInRangeSet(pending_ranges, cpu_addr, size, [&](VAddr, VAddr) { must_release = true; });
- if (must_release) {
- std::function<void()> tmp([]() {});
- rasterizer.SignalFence(std::move(tmp));
- }
- if (must_wait || must_release) {
- rasterizer.ReleaseFences();
- }
-}
-
-template <class P>
void BufferCache<P>::ClearDownload(IntervalType subtract_interval) {
RemoveEachInOverlapCounter(async_downloads, subtract_interval, -1024);
uncommitted_ranges.subtract(subtract_interval);
- pending_ranges.subtract(subtract_interval);
for (auto& interval_set : committed_ranges) {
interval_set.subtract(subtract_interval);
}
@@ -164,7 +159,6 @@ bool BufferCache<P>::DMACopy(GPUVAddr src_address, GPUVAddr dest_address, u64 am
}
const IntervalType subtract_interval{*cpu_dest_address, *cpu_dest_address + amount};
- WaitOnAsyncFlushes(*cpu_src_address, static_cast<u32>(amount));
ClearDownload(subtract_interval);
BufferId buffer_a;
@@ -192,7 +186,6 @@ bool BufferCache<P>::DMACopy(GPUVAddr src_address, GPUVAddr dest_address, u64 am
const IntervalType add_interval{new_base_address, new_base_address + size};
tmp_intervals.push_back(add_interval);
uncommitted_ranges.add(add_interval);
- pending_ranges.add(add_interval);
};
ForEachInRangeSet(common_ranges, *cpu_src_address, amount, mirror);
// This subtraction in this order is important for overlapping copies.
@@ -205,7 +198,7 @@ bool BufferCache<P>::DMACopy(GPUVAddr src_address, GPUVAddr dest_address, u64 am
if (has_new_downloads) {
memory_tracker.MarkRegionAsGpuModified(*cpu_dest_address, amount);
}
- std::vector<u8> tmp_buffer(amount);
+ tmp_buffer.resize(amount);
cpu_memory.ReadBlockUnsafe(*cpu_src_address, tmp_buffer.data(), amount);
cpu_memory.WriteBlockUnsafe(*cpu_dest_address, tmp_buffer.data(), amount);
return true;
@@ -441,9 +434,7 @@ void BufferCache<P>::BindComputeTextureBuffer(size_t tbo_index, GPUVAddr gpu_add
template <class P>
void BufferCache<P>::FlushCachedWrites() {
- cached_write_buffer_ids.clear();
memory_tracker.FlushCachedWrites();
- cached_ranges.clear();
}
template <class P>
@@ -474,15 +465,13 @@ void BufferCache<P>::CommitAsyncFlushesHigh() {
if (committed_ranges.empty()) {
if constexpr (IMPLEMENTS_ASYNC_DOWNLOADS) {
- if (active_async_buffers) {
- async_buffers.emplace_back(std::optional<Async_Buffer>{});
- }
+
+ async_buffers.emplace_back(std::optional<Async_Buffer>{});
}
return;
}
MICROPROFILE_SCOPE(GPU_DownloadMemory);
- pending_ranges.clear();
auto it = committed_ranges.begin();
while (it != committed_ranges.end()) {
auto& current_intervals = *it;
@@ -537,64 +526,65 @@ void BufferCache<P>::CommitAsyncFlushesHigh() {
committed_ranges.clear();
if (downloads.empty()) {
if constexpr (IMPLEMENTS_ASYNC_DOWNLOADS) {
- if (active_async_buffers) {
- async_buffers.emplace_back(std::optional<Async_Buffer>{});
- }
+
+ async_buffers.emplace_back(std::optional<Async_Buffer>{});
}
return;
}
- if (active_async_buffers) {
- if constexpr (IMPLEMENTS_ASYNC_DOWNLOADS) {
- auto download_staging = runtime.DownloadStagingBuffer(total_size_bytes, true);
- boost::container::small_vector<BufferCopy, 4> normalized_copies;
- IntervalSet new_async_range{};
- runtime.PreCopyBarrier();
- for (auto& [copy, buffer_id] : downloads) {
- copy.dst_offset += download_staging.offset;
- const std::array copies{copy};
- BufferCopy second_copy{copy};
- Buffer& buffer = slot_buffers[buffer_id];
- second_copy.src_offset = static_cast<size_t>(buffer.CpuAddr()) + copy.src_offset;
- VAddr orig_cpu_addr = static_cast<VAddr>(second_copy.src_offset);
- const IntervalType base_interval{orig_cpu_addr, orig_cpu_addr + copy.size};
- async_downloads += std::make_pair(base_interval, 1);
- runtime.CopyBuffer(download_staging.buffer, buffer, copies, false);
- normalized_copies.push_back(second_copy);
- }
- runtime.PostCopyBarrier();
- pending_downloads.emplace_back(std::move(normalized_copies));
- async_buffers.emplace_back(download_staging);
- } else {
- committed_ranges.clear();
- uncommitted_ranges.clear();
+ if constexpr (IMPLEMENTS_ASYNC_DOWNLOADS) {
+ auto download_staging = runtime.DownloadStagingBuffer(total_size_bytes, true);
+ boost::container::small_vector<BufferCopy, 4> normalized_copies;
+ IntervalSet new_async_range{};
+ runtime.PreCopyBarrier();
+ for (auto& [copy, buffer_id] : downloads) {
+ copy.dst_offset += download_staging.offset;
+ const std::array copies{copy};
+ BufferCopy second_copy{copy};
+ Buffer& buffer = slot_buffers[buffer_id];
+ second_copy.src_offset = static_cast<size_t>(buffer.CpuAddr()) + copy.src_offset;
+ VAddr orig_cpu_addr = static_cast<VAddr>(second_copy.src_offset);
+ const IntervalType base_interval{orig_cpu_addr, orig_cpu_addr + copy.size};
+ async_downloads += std::make_pair(base_interval, 1);
+ runtime.CopyBuffer(download_staging.buffer, buffer, copies, false);
+ normalized_copies.push_back(second_copy);
}
+ runtime.PostCopyBarrier();
+ pending_downloads.emplace_back(std::move(normalized_copies));
+ async_buffers.emplace_back(download_staging);
} else {
- if constexpr (USE_MEMORY_MAPS) {
- auto download_staging = runtime.DownloadStagingBuffer(total_size_bytes);
- runtime.PreCopyBarrier();
- for (auto& [copy, buffer_id] : downloads) {
- // Have in mind the staging buffer offset for the copy
- copy.dst_offset += download_staging.offset;
- const std::array copies{copy};
- runtime.CopyBuffer(download_staging.buffer, slot_buffers[buffer_id], copies, false);
- }
- runtime.PostCopyBarrier();
- runtime.Finish();
- for (const auto& [copy, buffer_id] : downloads) {
- const Buffer& buffer = slot_buffers[buffer_id];
- const VAddr cpu_addr = buffer.CpuAddr() + copy.src_offset;
- // Undo the modified offset
- const u64 dst_offset = copy.dst_offset - download_staging.offset;
- const u8* read_mapped_memory = download_staging.mapped_span.data() + dst_offset;
- cpu_memory.WriteBlockUnsafe(cpu_addr, read_mapped_memory, copy.size);
- }
+ if (!Settings::IsGPULevelHigh()) {
+ committed_ranges.clear();
+ uncommitted_ranges.clear();
} else {
- const std::span<u8> immediate_buffer = ImmediateBuffer(largest_copy);
- for (const auto& [copy, buffer_id] : downloads) {
- Buffer& buffer = slot_buffers[buffer_id];
- buffer.ImmediateDownload(copy.src_offset, immediate_buffer.subspan(0, copy.size));
- const VAddr cpu_addr = buffer.CpuAddr() + copy.src_offset;
- cpu_memory.WriteBlockUnsafe(cpu_addr, immediate_buffer.data(), copy.size);
+ if constexpr (USE_MEMORY_MAPS) {
+ auto download_staging = runtime.DownloadStagingBuffer(total_size_bytes);
+ runtime.PreCopyBarrier();
+ for (auto& [copy, buffer_id] : downloads) {
+ // Have in mind the staging buffer offset for the copy
+ copy.dst_offset += download_staging.offset;
+ const std::array copies{copy};
+ runtime.CopyBuffer(download_staging.buffer, slot_buffers[buffer_id], copies,
+ false);
+ }
+ runtime.PostCopyBarrier();
+ runtime.Finish();
+ for (const auto& [copy, buffer_id] : downloads) {
+ const Buffer& buffer = slot_buffers[buffer_id];
+ const VAddr cpu_addr = buffer.CpuAddr() + copy.src_offset;
+ // Undo the modified offset
+ const u64 dst_offset = copy.dst_offset - download_staging.offset;
+ const u8* read_mapped_memory = download_staging.mapped_span.data() + dst_offset;
+ cpu_memory.WriteBlockUnsafe(cpu_addr, read_mapped_memory, copy.size);
+ }
+ } else {
+ const std::span<u8> immediate_buffer = ImmediateBuffer(largest_copy);
+ for (const auto& [copy, buffer_id] : downloads) {
+ Buffer& buffer = slot_buffers[buffer_id];
+ buffer.ImmediateDownload(copy.src_offset,
+ immediate_buffer.subspan(0, copy.size));
+ const VAddr cpu_addr = buffer.CpuAddr() + copy.src_offset;
+ cpu_memory.WriteBlockUnsafe(cpu_addr, immediate_buffer.data(), copy.size);
+ }
}
}
}
@@ -1213,16 +1203,14 @@ void BufferCache<P>::UpdateComputeTextureBuffers() {
template <class P>
void BufferCache<P>::MarkWrittenBuffer(BufferId buffer_id, VAddr cpu_addr, u32 size) {
- memory_tracker.MarkRegionAsGpuModified(cpu_addr, size);
-
if (memory_tracker.IsRegionCpuModified(cpu_addr, size)) {
SynchronizeBuffer(slot_buffers[buffer_id], cpu_addr, size);
}
+ memory_tracker.MarkRegionAsGpuModified(cpu_addr, size);
const IntervalType base_interval{cpu_addr, cpu_addr + size};
common_ranges.add(base_interval);
uncommitted_ranges.add(base_interval);
- pending_ranges.add(base_interval);
}
template <class P>
@@ -1629,7 +1617,6 @@ void BufferCache<P>::DeleteBuffer(BufferId buffer_id, bool do_not_mark) {
replace(transform_feedback_buffers);
replace(compute_uniform_buffers);
replace(compute_storage_buffers);
- std::erase(cached_write_buffer_ids, buffer_id);
// Mark the whole buffer as CPU written to stop tracking CPU writes
if (!do_not_mark) {
@@ -1668,14 +1655,15 @@ typename BufferCache<P>::Binding BufferCache<P>::StorageBufferBinding(GPUVAddr s
const bool is_nvn_cbuf = cbuf_index == 0;
// The NVN driver buffer (index 0) is known to pack the SSBO address followed by its size.
if (is_nvn_cbuf) {
- return gpu_memory->Read<u32>(ssbo_addr + 8);
+ const u32 ssbo_size = gpu_memory->Read<u32>(ssbo_addr + 8);
+ if (ssbo_size != 0) {
+ return ssbo_size;
+ }
}
// Other titles (notably Doom Eternal) may use STG/LDG on buffer addresses in custom defined
// cbufs, which do not store the sizes adjacent to the addresses, so use the fully
// mapped buffer size for now.
const u32 memory_layout_size = static_cast<u32>(gpu_memory->GetMemoryLayoutSize(gpu_addr));
- LOG_INFO(HW_GPU, "Binding storage buffer for cbuf index {}, MemoryLayoutSize 0x{:X}",
- cbuf_index, memory_layout_size);
return memory_layout_size;
}();
const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr);