From 0a2536a0df1f4aea406f2132d3edda0430acc9d1 Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Mon, 25 Dec 2023 07:32:16 +0100 Subject: SMMU: Initial adaptation to video_core. --- src/video_core/buffer_cache/buffer_base.h | 3 +- src/video_core/buffer_cache/buffer_cache.h | 450 +++++++++++----------- src/video_core/buffer_cache/buffer_cache_base.h | 98 +++-- src/video_core/buffer_cache/memory_tracker_base.h | 18 +- src/video_core/buffer_cache/word_manager.h | 24 +- 5 files changed, 304 insertions(+), 289 deletions(-) (limited to 'src/video_core/buffer_cache') diff --git a/src/video_core/buffer_cache/buffer_base.h b/src/video_core/buffer_cache/buffer_base.h index 0bb3bf8ae..40e98e395 100644 --- a/src/video_core/buffer_cache/buffer_base.h +++ b/src/video_core/buffer_cache/buffer_base.h @@ -33,13 +33,12 @@ struct NullBufferParams {}; * * The buffer size and address is forcefully aligned to CPU page boundaries. */ -template class BufferBase { public: static constexpr u64 BASE_PAGE_BITS = 16; static constexpr u64 BASE_PAGE_SIZE = 1ULL << BASE_PAGE_BITS; - explicit BufferBase(RasterizerInterface& rasterizer_, VAddr cpu_addr_, u64 size_bytes_) + explicit BufferBase(VAddr cpu_addr_, u64 size_bytes_) : cpu_addr{cpu_addr_}, size_bytes{size_bytes_} {} explicit BufferBase(NullBufferParams) {} diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h index 6d1fc3887..6fe2e8b93 100644 --- a/src/video_core/buffer_cache/buffer_cache.h +++ b/src/video_core/buffer_cache/buffer_cache.h @@ -8,16 +8,16 @@ #include #include "video_core/buffer_cache/buffer_cache_base.h" +#include "video_core/guest_memory.h" +#include "video_core/host1x/gpu_device_memory_manager.h" namespace VideoCommon { using Core::Memory::YUZU_PAGESIZE; template -BufferCache

::BufferCache(VideoCore::RasterizerInterface& rasterizer_, - Core::Memory::Memory& cpu_memory_, Runtime& runtime_) - : runtime{runtime_}, rasterizer{rasterizer_}, cpu_memory{cpu_memory_}, memory_tracker{ - rasterizer} { +BufferCache

::BufferCache(Tegra::MaxwellDeviceMemoryManager& device_memory_, Runtime& runtime_) + : runtime{runtime_}, device_memory{device_memory_}, memory_tracker{device_memory} { // Ensure the first slot is used for the null buffer void(slot_buffers.insert(runtime, NullBufferParams{})); common_ranges.clear(); @@ -29,17 +29,17 @@ BufferCache

::BufferCache(VideoCore::RasterizerInterface& rasterizer_, return; } - const s64 device_memory = static_cast(runtime.GetDeviceLocalMemory()); - const s64 min_spacing_expected = device_memory - 1_GiB; - const s64 min_spacing_critical = device_memory - 512_MiB; - const s64 mem_threshold = std::min(device_memory, TARGET_THRESHOLD); + const s64 device_local_memory = static_cast(runtime.GetDeviceLocalMemory()); + const s64 min_spacing_expected = device_local_memory - 1_GiB; + const s64 min_spacing_critical = device_local_memory - 512_MiB; + const s64 mem_threshold = std::min(device_local_memory, TARGET_THRESHOLD); const s64 min_vacancy_expected = (6 * mem_threshold) / 10; const s64 min_vacancy_critical = (3 * mem_threshold) / 10; minimum_memory = static_cast( - std::max(std::min(device_memory - min_vacancy_expected, min_spacing_expected), + std::max(std::min(device_local_memory - min_vacancy_expected, min_spacing_expected), DEFAULT_EXPECTED_MEMORY)); critical_memory = static_cast( - std::max(std::min(device_memory - min_vacancy_critical, min_spacing_critical), + std::max(std::min(device_local_memory - min_vacancy_critical, min_spacing_critical), DEFAULT_CRITICAL_MEMORY)); } @@ -105,71 +105,72 @@ void BufferCache

::TickFrame() { } template -void BufferCache

::WriteMemory(VAddr cpu_addr, u64 size) { - if (memory_tracker.IsRegionGpuModified(cpu_addr, size)) { - const IntervalType subtract_interval{cpu_addr, cpu_addr + size}; +void BufferCache

::WriteMemory(DAddr device_addr, u64 size) { + if (memory_tracker.IsRegionGpuModified(device_addr, size)) { + const IntervalType subtract_interval{device_addr, device_addr + size}; ClearDownload(subtract_interval); common_ranges.subtract(subtract_interval); } - memory_tracker.MarkRegionAsCpuModified(cpu_addr, size); + memory_tracker.MarkRegionAsCpuModified(device_addr, size); } template -void BufferCache

::CachedWriteMemory(VAddr cpu_addr, u64 size) { - const bool is_dirty = IsRegionRegistered(cpu_addr, size); +void BufferCache

::CachedWriteMemory(DAddr device_addr, u64 size) { + const bool is_dirty = IsRegionRegistered(device_addr, size); if (!is_dirty) { return; } - VAddr aligned_start = Common::AlignDown(cpu_addr, YUZU_PAGESIZE); - VAddr aligned_end = Common::AlignUp(cpu_addr + size, YUZU_PAGESIZE); + DAddr aligned_start = Common::AlignDown(device_addr, YUZU_PAGESIZE); + DAddr aligned_end = Common::AlignUp(device_addr + size, YUZU_PAGESIZE); if (!IsRegionGpuModified(aligned_start, aligned_end - aligned_start)) { - WriteMemory(cpu_addr, size); + WriteMemory(device_addr, size); return; } tmp_buffer.resize_destructive(size); - cpu_memory.ReadBlockUnsafe(cpu_addr, tmp_buffer.data(), size); + device_memory.ReadBlockUnsafe(device_addr, tmp_buffer.data(), size); - InlineMemoryImplementation(cpu_addr, size, tmp_buffer); + InlineMemoryImplementation(device_addr, size, tmp_buffer); } template -bool BufferCache

::OnCPUWrite(VAddr cpu_addr, u64 size) { - const bool is_dirty = IsRegionRegistered(cpu_addr, size); +bool BufferCache

::OnCPUWrite(DAddr device_addr, u64 size) { + const bool is_dirty = IsRegionRegistered(device_addr, size); if (!is_dirty) { return false; } - if (memory_tracker.IsRegionGpuModified(cpu_addr, size)) { + if (memory_tracker.IsRegionGpuModified(device_addr, size)) { return true; } - WriteMemory(cpu_addr, size); + WriteMemory(device_addr, size); return false; } template -std::optional BufferCache

::GetFlushArea(VAddr cpu_addr, +std::optional BufferCache

::GetFlushArea(DAddr device_addr, u64 size) { std::optional area{}; area.emplace(); - VAddr cpu_addr_start_aligned = Common::AlignDown(cpu_addr, Core::Memory::YUZU_PAGESIZE); - VAddr cpu_addr_end_aligned = Common::AlignUp(cpu_addr + size, Core::Memory::YUZU_PAGESIZE); - area->start_address = cpu_addr_start_aligned; - area->end_address = cpu_addr_end_aligned; - if (memory_tracker.IsRegionPreflushable(cpu_addr, size)) { + DAddr device_addr_start_aligned = Common::AlignDown(device_addr, Core::Memory::YUZU_PAGESIZE); + DAddr device_addr_end_aligned = + Common::AlignUp(device_addr + size, Core::Memory::YUZU_PAGESIZE); + area->start_address = device_addr_start_aligned; + area->end_address = device_addr_end_aligned; + if (memory_tracker.IsRegionPreflushable(device_addr, size)) { area->preemtive = true; return area; }; - area->preemtive = - !IsRegionGpuModified(cpu_addr_start_aligned, cpu_addr_end_aligned - cpu_addr_start_aligned); - memory_tracker.MarkRegionAsPreflushable(cpu_addr_start_aligned, - cpu_addr_end_aligned - cpu_addr_start_aligned); + area->preemtive = !IsRegionGpuModified(device_addr_start_aligned, + device_addr_end_aligned - device_addr_start_aligned); + memory_tracker.MarkRegionAsPreflushable(device_addr_start_aligned, + device_addr_end_aligned - device_addr_start_aligned); return area; } template -void BufferCache

::DownloadMemory(VAddr cpu_addr, u64 size) { - ForEachBufferInRange(cpu_addr, size, [&](BufferId, Buffer& buffer) { - DownloadBufferMemory(buffer, cpu_addr, size); +void BufferCache

::DownloadMemory(DAddr device_addr, u64 size) { + ForEachBufferInRange(device_addr, size, [&](BufferId, Buffer& buffer) { + DownloadBufferMemory(buffer, device_addr, size); }); } @@ -184,8 +185,8 @@ void BufferCache

::ClearDownload(IntervalType subtract_interval) { template bool BufferCache

::DMACopy(GPUVAddr src_address, GPUVAddr dest_address, u64 amount) { - const std::optional cpu_src_address = gpu_memory->GpuToCpuAddress(src_address); - const std::optional cpu_dest_address = gpu_memory->GpuToCpuAddress(dest_address); + const std::optional cpu_src_address = gpu_memory->GpuToCpuAddress(src_address); + const std::optional cpu_dest_address = gpu_memory->GpuToCpuAddress(dest_address); if (!cpu_src_address || !cpu_dest_address) { return false; } @@ -216,10 +217,10 @@ bool BufferCache

::DMACopy(GPUVAddr src_address, GPUVAddr dest_address, u64 am }}; boost::container::small_vector tmp_intervals; - auto mirror = [&](VAddr base_address, VAddr base_address_end) { + auto mirror = [&](DAddr base_address, DAddr base_address_end) { const u64 size = base_address_end - base_address; - const VAddr diff = base_address - *cpu_src_address; - const VAddr new_base_address = *cpu_dest_address + diff; + const DAddr diff = base_address - *cpu_src_address; + const DAddr new_base_address = *cpu_dest_address + diff; const IntervalType add_interval{new_base_address, new_base_address + size}; tmp_intervals.push_back(add_interval); uncommitted_ranges.add(add_interval); @@ -239,15 +240,15 @@ bool BufferCache

::DMACopy(GPUVAddr src_address, GPUVAddr dest_address, u64 am memory_tracker.MarkRegionAsGpuModified(*cpu_dest_address, amount); } - Core::Memory::CpuGuestMemoryScoped tmp( - cpu_memory, *cpu_src_address, amount, &tmp_buffer); + Tegra::Memory::DeviceGuestMemoryScoped tmp( + device_memory, *cpu_src_address, amount, &tmp_buffer); tmp.SetAddressAndSize(*cpu_dest_address, amount); return true; } template bool BufferCache

::DMAClear(GPUVAddr dst_address, u64 amount, u32 value) { - const std::optional cpu_dst_address = gpu_memory->GpuToCpuAddress(dst_address); + const std::optional cpu_dst_address = gpu_memory->GpuToCpuAddress(dst_address); if (!cpu_dst_address) { return false; } @@ -273,23 +274,23 @@ template std::pair BufferCache

::ObtainBuffer(GPUVAddr gpu_addr, u32 size, ObtainBufferSynchronize sync_info, ObtainBufferOperation post_op) { - const std::optional cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr); - if (!cpu_addr) { + const std::optional device_addr = gpu_memory->GpuToCpuAddress(gpu_addr); + if (!device_addr) { return {&slot_buffers[NULL_BUFFER_ID], 0}; } - return ObtainCPUBuffer(*cpu_addr, size, sync_info, post_op); + return ObtainCPUBuffer(*device_addr, size, sync_info, post_op); } template std::pair BufferCache

::ObtainCPUBuffer( - VAddr cpu_addr, u32 size, ObtainBufferSynchronize sync_info, ObtainBufferOperation post_op) { - const BufferId buffer_id = FindBuffer(cpu_addr, size); + DAddr device_addr, u32 size, ObtainBufferSynchronize sync_info, ObtainBufferOperation post_op) { + const BufferId buffer_id = FindBuffer(device_addr, size); Buffer& buffer = slot_buffers[buffer_id]; // synchronize op switch (sync_info) { case ObtainBufferSynchronize::FullSynchronize: - SynchronizeBuffer(buffer, cpu_addr, size); + SynchronizeBuffer(buffer, device_addr, size); break; default: break; @@ -297,12 +298,12 @@ std::pair BufferCache

::ObtainCPUBuffer( switch (post_op) { case ObtainBufferOperation::MarkAsWritten: - MarkWrittenBuffer(buffer_id, cpu_addr, size); + MarkWrittenBuffer(buffer_id, device_addr, size); break; case ObtainBufferOperation::DiscardWrite: { - VAddr cpu_addr_start = Common::AlignDown(cpu_addr, 64); - VAddr cpu_addr_end = Common::AlignUp(cpu_addr + size, 64); - IntervalType interval{cpu_addr_start, cpu_addr_end}; + DAddr device_addr_start = Common::AlignDown(device_addr, 64); + DAddr device_addr_end = Common::AlignUp(device_addr + size, 64); + IntervalType interval{device_addr_start, device_addr_end}; ClearDownload(interval); common_ranges.subtract(interval); break; @@ -311,15 +312,15 @@ std::pair BufferCache

::ObtainCPUBuffer( break; } - return {&buffer, buffer.Offset(cpu_addr)}; + return {&buffer, buffer.Offset(device_addr)}; } template void BufferCache

::BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr, u32 size) { - const std::optional cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr); + const std::optional device_addr = gpu_memory->GpuToCpuAddress(gpu_addr); const Binding binding{ - .cpu_addr = *cpu_addr, + .device_addr = *device_addr, .size = size, .buffer_id = BufferId{}, }; @@ -555,16 +556,17 @@ void BufferCache

::CommitAsyncFlushesHigh() { for (const IntervalSet& intervals : committed_ranges) { for (auto& interval : intervals) { const std::size_t size = interval.upper() - interval.lower(); - const VAddr cpu_addr = interval.lower(); - ForEachBufferInRange(cpu_addr, size, [&](BufferId buffer_id, Buffer& buffer) { - const VAddr buffer_start = buffer.CpuAddr(); - const VAddr buffer_end = buffer_start + buffer.SizeBytes(); - const VAddr new_start = std::max(buffer_start, cpu_addr); - const VAddr new_end = std::min(buffer_end, cpu_addr + size); + const DAddr device_addr = interval.lower(); + ForEachBufferInRange(device_addr, size, [&](BufferId buffer_id, Buffer& buffer) { + const DAddr buffer_start = buffer.CpuAddr(); + const DAddr buffer_end = buffer_start + buffer.SizeBytes(); + const DAddr new_start = std::max(buffer_start, device_addr); + const DAddr new_end = std::min(buffer_end, device_addr + size); memory_tracker.ForEachDownloadRange( - new_start, new_end - new_start, false, [&](u64 cpu_addr_out, u64 range_size) { - const VAddr buffer_addr = buffer.CpuAddr(); - const auto add_download = [&](VAddr start, VAddr end) { + new_start, new_end - new_start, false, + [&](u64 device_addr_out, u64 range_size) { + const DAddr buffer_addr = buffer.CpuAddr(); + const auto add_download = [&](DAddr start, DAddr end) { const u64 new_offset = start - buffer_addr; const u64 new_size = end - start; downloads.push_back({ @@ -582,7 +584,7 @@ void BufferCache

::CommitAsyncFlushesHigh() { largest_copy = std::max(largest_copy, new_size); }; - ForEachInRangeSet(common_ranges, cpu_addr_out, range_size, add_download); + ForEachInRangeSet(common_ranges, device_addr_out, range_size, add_download); }); }); } @@ -605,8 +607,8 @@ void BufferCache

::CommitAsyncFlushesHigh() { BufferCopy second_copy{copy}; Buffer& buffer = slot_buffers[buffer_id]; second_copy.src_offset = static_cast(buffer.CpuAddr()) + copy.src_offset; - VAddr orig_cpu_addr = static_cast(second_copy.src_offset); - const IntervalType base_interval{orig_cpu_addr, orig_cpu_addr + copy.size}; + DAddr orig_device_addr = static_cast(second_copy.src_offset); + const IntervalType base_interval{orig_device_addr, orig_device_addr + copy.size}; async_downloads += std::make_pair(base_interval, 1); buffer.MarkUsage(copy.src_offset, copy.size); runtime.CopyBuffer(download_staging.buffer, buffer, copies, false); @@ -635,11 +637,11 @@ void BufferCache

::CommitAsyncFlushesHigh() { runtime.Finish(); for (const auto& [copy, buffer_id] : downloads) { const Buffer& buffer = slot_buffers[buffer_id]; - const VAddr cpu_addr = buffer.CpuAddr() + copy.src_offset; + const DAddr device_addr = buffer.CpuAddr() + copy.src_offset; // Undo the modified offset const u64 dst_offset = copy.dst_offset - download_staging.offset; const u8* read_mapped_memory = download_staging.mapped_span.data() + dst_offset; - cpu_memory.WriteBlockUnsafe(cpu_addr, read_mapped_memory, copy.size); + device_memory.WriteBlockUnsafe(device_addr, read_mapped_memory, copy.size); } } else { const std::span immediate_buffer = ImmediateBuffer(largest_copy); @@ -647,8 +649,8 @@ void BufferCache

::CommitAsyncFlushesHigh() { Buffer& buffer = slot_buffers[buffer_id]; buffer.ImmediateDownload(copy.src_offset, immediate_buffer.subspan(0, copy.size)); - const VAddr cpu_addr = buffer.CpuAddr() + copy.src_offset; - cpu_memory.WriteBlockUnsafe(cpu_addr, immediate_buffer.data(), copy.size); + const DAddr device_addr = buffer.CpuAddr() + copy.src_offset; + device_memory.WriteBlockUnsafe(device_addr, immediate_buffer.data(), copy.size); } } } @@ -681,19 +683,19 @@ void BufferCache

::PopAsyncBuffers() { u8* base = async_buffer->mapped_span.data(); const size_t base_offset = async_buffer->offset; for (const auto& copy : downloads) { - const VAddr cpu_addr = static_cast(copy.src_offset); + const DAddr device_addr = static_cast(copy.src_offset); const u64 dst_offset = copy.dst_offset - base_offset; const u8* read_mapped_memory = base + dst_offset; ForEachInOverlapCounter( - async_downloads, cpu_addr, copy.size, [&](VAddr start, VAddr end, int count) { - cpu_memory.WriteBlockUnsafe(start, &read_mapped_memory[start - cpu_addr], - end - start); + async_downloads, device_addr, copy.size, [&](DAddr start, DAddr end, int count) { + device_memory.WriteBlockUnsafe(start, &read_mapped_memory[start - device_addr], + end - start); if (count == 1) { const IntervalType base_interval{start, end}; common_ranges.subtract(base_interval); } }); - const IntervalType subtract_interval{cpu_addr, cpu_addr + copy.size}; + const IntervalType subtract_interval{device_addr, device_addr + copy.size}; RemoveEachInOverlapCounter(async_downloads, subtract_interval, -1); } async_buffers_death_ring.emplace_back(*async_buffer); @@ -703,15 +705,15 @@ void BufferCache

::PopAsyncBuffers() { } template -bool BufferCache

::IsRegionGpuModified(VAddr addr, size_t size) { +bool BufferCache

::IsRegionGpuModified(DAddr addr, size_t size) { bool is_dirty = false; - ForEachInRangeSet(common_ranges, addr, size, [&](VAddr, VAddr) { is_dirty = true; }); + ForEachInRangeSet(common_ranges, addr, size, [&](DAddr, DAddr) { is_dirty = true; }); return is_dirty; } template -bool BufferCache

::IsRegionRegistered(VAddr addr, size_t size) { - const VAddr end_addr = addr + size; +bool BufferCache

::IsRegionRegistered(DAddr addr, size_t size) { + const DAddr end_addr = addr + size; const u64 page_end = Common::DivCeil(end_addr, CACHING_PAGESIZE); for (u64 page = addr >> CACHING_PAGEBITS; page < page_end;) { const BufferId buffer_id = page_table[page]; @@ -720,8 +722,8 @@ bool BufferCache

::IsRegionRegistered(VAddr addr, size_t size) { continue; } Buffer& buffer = slot_buffers[buffer_id]; - const VAddr buf_start_addr = buffer.CpuAddr(); - const VAddr buf_end_addr = buf_start_addr + buffer.SizeBytes(); + const DAddr buf_start_addr = buffer.CpuAddr(); + const DAddr buf_end_addr = buf_start_addr + buffer.SizeBytes(); if (buf_start_addr < end_addr && addr < buf_end_addr) { return true; } @@ -731,7 +733,7 @@ bool BufferCache

::IsRegionRegistered(VAddr addr, size_t size) { } template -bool BufferCache

::IsRegionCpuModified(VAddr addr, size_t size) { +bool BufferCache

::IsRegionCpuModified(DAddr addr, size_t size) { return memory_tracker.IsRegionCpuModified(addr, size); } @@ -739,7 +741,7 @@ template void BufferCache

::BindHostIndexBuffer() { Buffer& buffer = slot_buffers[channel_state->index_buffer.buffer_id]; TouchBuffer(buffer, channel_state->index_buffer.buffer_id); - const u32 offset = buffer.Offset(channel_state->index_buffer.cpu_addr); + const u32 offset = buffer.Offset(channel_state->index_buffer.device_addr); const u32 size = channel_state->index_buffer.size; const auto& draw_state = maxwell3d->draw_manager->GetDrawState(); if (!draw_state.inline_index_draw_indexes.empty()) [[unlikely]] { @@ -754,7 +756,7 @@ void BufferCache

::BindHostIndexBuffer() { buffer.ImmediateUpload(0, draw_state.inline_index_draw_indexes); } } else { - SynchronizeBuffer(buffer, channel_state->index_buffer.cpu_addr, size); + SynchronizeBuffer(buffer, channel_state->index_buffer.device_addr, size); } if constexpr (HAS_FULL_INDEX_AND_PRIMITIVE_SUPPORT) { const u32 new_offset = @@ -777,7 +779,7 @@ void BufferCache

::BindHostVertexBuffers() { const Binding& binding = channel_state->vertex_buffers[index]; Buffer& buffer = slot_buffers[binding.buffer_id]; TouchBuffer(buffer, binding.buffer_id); - SynchronizeBuffer(buffer, binding.cpu_addr, binding.size); + SynchronizeBuffer(buffer, binding.device_addr, binding.size); if (!flags[Dirty::VertexBuffer0 + index]) { continue; } @@ -797,7 +799,7 @@ void BufferCache

::BindHostVertexBuffers() { Buffer& buffer = slot_buffers[binding.buffer_id]; const u32 stride = maxwell3d->regs.vertex_streams[index].stride; - const u32 offset = buffer.Offset(binding.cpu_addr); + const u32 offset = buffer.Offset(binding.device_addr); buffer.MarkUsage(offset, binding.size); host_bindings.buffers.push_back(&buffer); @@ -814,7 +816,7 @@ void BufferCache

::BindHostDrawIndirectBuffers() { const auto bind_buffer = [this](const Binding& binding) { Buffer& buffer = slot_buffers[binding.buffer_id]; TouchBuffer(buffer, binding.buffer_id); - SynchronizeBuffer(buffer, binding.cpu_addr, binding.size); + SynchronizeBuffer(buffer, binding.device_addr, binding.size); }; if (current_draw_indirect->include_count) { bind_buffer(channel_state->count_buffer_binding); @@ -842,13 +844,13 @@ template void BufferCache

::BindHostGraphicsUniformBuffer(size_t stage, u32 index, u32 binding_index, bool needs_bind) { const Binding& binding = channel_state->uniform_buffers[stage][index]; - const VAddr cpu_addr = binding.cpu_addr; + const DAddr device_addr = binding.device_addr; const u32 size = std::min(binding.size, (*channel_state->uniform_buffer_sizes)[stage][index]); Buffer& buffer = slot_buffers[binding.buffer_id]; TouchBuffer(buffer, binding.buffer_id); const bool use_fast_buffer = binding.buffer_id != NULL_BUFFER_ID && size <= channel_state->uniform_buffer_skip_cache_size && - !memory_tracker.IsRegionGpuModified(cpu_addr, size); + !memory_tracker.IsRegionGpuModified(device_addr, size); if (use_fast_buffer) { if constexpr (IS_OPENGL) { if (runtime.HasFastBufferSubData()) { @@ -862,7 +864,7 @@ void BufferCache

::BindHostGraphicsUniformBuffer(size_t stage, u32 index, u32 channel_state->uniform_buffer_binding_sizes[stage][binding_index] = size; runtime.BindFastUniformBuffer(stage, binding_index, size); } - const auto span = ImmediateBufferWithData(cpu_addr, size); + const auto span = ImmediateBufferWithData(device_addr, size); runtime.PushFastUniformBuffer(stage, binding_index, span); return; } @@ -873,11 +875,11 @@ void BufferCache

::BindHostGraphicsUniformBuffer(size_t stage, u32 index, u32 } // Stream buffer path to avoid stalling on non-Nvidia drivers or Vulkan const std::span span = runtime.BindMappedUniformBuffer(stage, binding_index, size); - cpu_memory.ReadBlockUnsafe(cpu_addr, span.data(), size); + device_memory.ReadBlockUnsafe(device_addr, span.data(), size); return; } // Classic cached path - const bool sync_cached = SynchronizeBuffer(buffer, cpu_addr, size); + const bool sync_cached = SynchronizeBuffer(buffer, device_addr, size); if (sync_cached) { ++channel_state->uniform_cache_hits[0]; } @@ -892,7 +894,7 @@ void BufferCache

::BindHostGraphicsUniformBuffer(size_t stage, u32 index, u32 if (!needs_bind) { return; } - const u32 offset = buffer.Offset(cpu_addr); + const u32 offset = buffer.Offset(device_addr); if constexpr (IS_OPENGL) { // Fast buffer will be unbound channel_state->fast_bound_uniform_buffers[stage] &= ~(1U << binding_index); @@ -920,14 +922,14 @@ void BufferCache

::BindHostGraphicsStorageBuffers(size_t stage) { Buffer& buffer = slot_buffers[binding.buffer_id]; TouchBuffer(buffer, binding.buffer_id); const u32 size = binding.size; - SynchronizeBuffer(buffer, binding.cpu_addr, size); + SynchronizeBuffer(buffer, binding.device_addr, size); - const u32 offset = buffer.Offset(binding.cpu_addr); + const u32 offset = buffer.Offset(binding.device_addr); buffer.MarkUsage(offset, size); const bool is_written = ((channel_state->written_storage_buffers[stage] >> index) & 1) != 0; if (is_written) { - MarkWrittenBuffer(binding.buffer_id, binding.cpu_addr, size); + MarkWrittenBuffer(binding.buffer_id, binding.device_addr, size); } if constexpr (NEEDS_BIND_STORAGE_INDEX) { @@ -945,14 +947,14 @@ void BufferCache

::BindHostGraphicsTextureBuffers(size_t stage) { const TextureBufferBinding& binding = channel_state->texture_buffers[stage][index]; Buffer& buffer = slot_buffers[binding.buffer_id]; const u32 size = binding.size; - SynchronizeBuffer(buffer, binding.cpu_addr, size); + SynchronizeBuffer(buffer, binding.device_addr, size); const bool is_written = ((channel_state->written_texture_buffers[stage] >> index) & 1) != 0; if (is_written) { - MarkWrittenBuffer(binding.buffer_id, binding.cpu_addr, size); + MarkWrittenBuffer(binding.buffer_id, binding.device_addr, size); } - const u32 offset = buffer.Offset(binding.cpu_addr); + const u32 offset = buffer.Offset(binding.device_addr); const PixelFormat format = binding.format; buffer.MarkUsage(offset, size); if constexpr (SEPARATE_IMAGE_BUFFERS_BINDINGS) { @@ -982,11 +984,11 @@ void BufferCache

::BindHostTransformFeedbackBuffers() { Buffer& buffer = slot_buffers[binding.buffer_id]; TouchBuffer(buffer, binding.buffer_id); const u32 size = binding.size; - SynchronizeBuffer(buffer, binding.cpu_addr, size); + SynchronizeBuffer(buffer, binding.device_addr, size); - MarkWrittenBuffer(binding.buffer_id, binding.cpu_addr, size); + MarkWrittenBuffer(binding.buffer_id, binding.device_addr, size); - const u32 offset = buffer.Offset(binding.cpu_addr); + const u32 offset = buffer.Offset(binding.device_addr); buffer.MarkUsage(offset, size); host_bindings.buffers.push_back(&buffer); host_bindings.offsets.push_back(offset); @@ -1011,9 +1013,9 @@ void BufferCache

::BindHostComputeUniformBuffers() { TouchBuffer(buffer, binding.buffer_id); const u32 size = std::min(binding.size, (*channel_state->compute_uniform_buffer_sizes)[index]); - SynchronizeBuffer(buffer, binding.cpu_addr, size); + SynchronizeBuffer(buffer, binding.device_addr, size); - const u32 offset = buffer.Offset(binding.cpu_addr); + const u32 offset = buffer.Offset(binding.device_addr); buffer.MarkUsage(offset, size); if constexpr (NEEDS_BIND_UNIFORM_INDEX) { runtime.BindComputeUniformBuffer(binding_index, buffer, offset, size); @@ -1032,15 +1034,15 @@ void BufferCache

::BindHostComputeStorageBuffers() { Buffer& buffer = slot_buffers[binding.buffer_id]; TouchBuffer(buffer, binding.buffer_id); const u32 size = binding.size; - SynchronizeBuffer(buffer, binding.cpu_addr, size); + SynchronizeBuffer(buffer, binding.device_addr, size); - const u32 offset = buffer.Offset(binding.cpu_addr); + const u32 offset = buffer.Offset(binding.device_addr); buffer.MarkUsage(offset, size); const bool is_written = ((channel_state->written_compute_storage_buffers >> index) & 1) != 0; if (is_written) { - MarkWrittenBuffer(binding.buffer_id, binding.cpu_addr, size); + MarkWrittenBuffer(binding.buffer_id, binding.device_addr, size); } if constexpr (NEEDS_BIND_STORAGE_INDEX) { @@ -1058,15 +1060,15 @@ void BufferCache

::BindHostComputeTextureBuffers() { const TextureBufferBinding& binding = channel_state->compute_texture_buffers[index]; Buffer& buffer = slot_buffers[binding.buffer_id]; const u32 size = binding.size; - SynchronizeBuffer(buffer, binding.cpu_addr, size); + SynchronizeBuffer(buffer, binding.device_addr, size); const bool is_written = ((channel_state->written_compute_texture_buffers >> index) & 1) != 0; if (is_written) { - MarkWrittenBuffer(binding.buffer_id, binding.cpu_addr, size); + MarkWrittenBuffer(binding.buffer_id, binding.device_addr, size); } - const u32 offset = buffer.Offset(binding.cpu_addr); + const u32 offset = buffer.Offset(binding.device_addr); const PixelFormat format = binding.format; buffer.MarkUsage(offset, size); if constexpr (SEPARATE_IMAGE_BUFFERS_BINDINGS) { @@ -1131,7 +1133,7 @@ void BufferCache

::UpdateIndexBuffer() { inline_buffer_id = CreateBuffer(0, buffer_size); } channel_state->index_buffer = Binding{ - .cpu_addr = 0, + .device_addr = 0, .size = inline_index_size, .buffer_id = inline_buffer_id, }; @@ -1140,19 +1142,19 @@ void BufferCache

::UpdateIndexBuffer() { const GPUVAddr gpu_addr_begin = index_buffer_ref.StartAddress(); const GPUVAddr gpu_addr_end = index_buffer_ref.EndAddress(); - const std::optional cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr_begin); + const std::optional device_addr = gpu_memory->GpuToCpuAddress(gpu_addr_begin); const u32 address_size = static_cast(gpu_addr_end - gpu_addr_begin); const u32 draw_size = (index_buffer_ref.count + index_buffer_ref.first) * index_buffer_ref.FormatSizeInBytes(); const u32 size = std::min(address_size, draw_size); - if (size == 0 || !cpu_addr) { + if (size == 0 || !device_addr) { channel_state->index_buffer = NULL_BINDING; return; } channel_state->index_buffer = Binding{ - .cpu_addr = *cpu_addr, + .device_addr = *device_addr, .size = size, - .buffer_id = FindBuffer(*cpu_addr, size), + .buffer_id = FindBuffer(*device_addr, size), }; } @@ -1178,19 +1180,19 @@ void BufferCache

::UpdateVertexBuffer(u32 index) { const auto& limit = maxwell3d->regs.vertex_stream_limits[index]; const GPUVAddr gpu_addr_begin = array.Address(); const GPUVAddr gpu_addr_end = limit.Address() + 1; - const std::optional cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr_begin); + const std::optional device_addr = gpu_memory->GpuToCpuAddress(gpu_addr_begin); const u32 address_size = static_cast(gpu_addr_end - gpu_addr_begin); u32 size = address_size; // TODO: Analyze stride and number of vertices - if (array.enable == 0 || size == 0 || !cpu_addr) { + if (array.enable == 0 || size == 0 || !device_addr) { channel_state->vertex_buffers[index] = NULL_BINDING; return; } if (!gpu_memory->IsWithinGPUAddressRange(gpu_addr_end)) { size = static_cast(gpu_memory->MaxContinuousRange(gpu_addr_begin, size)); } - const BufferId buffer_id = FindBuffer(*cpu_addr, size); + const BufferId buffer_id = FindBuffer(*device_addr, size); channel_state->vertex_buffers[index] = Binding{ - .cpu_addr = *cpu_addr, + .device_addr = *device_addr, .size = size, .buffer_id = buffer_id, }; @@ -1199,15 +1201,15 @@ void BufferCache

::UpdateVertexBuffer(u32 index) { template void BufferCache

::UpdateDrawIndirect() { const auto update = [this](GPUVAddr gpu_addr, size_t size, Binding& binding) { - const std::optional cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr); - if (!cpu_addr) { + const std::optional device_addr = gpu_memory->GpuToCpuAddress(gpu_addr); + if (!device_addr) { binding = NULL_BINDING; return; } binding = Binding{ - .cpu_addr = *cpu_addr, + .device_addr = *device_addr, .size = static_cast(size), - .buffer_id = FindBuffer(*cpu_addr, static_cast(size)), + .buffer_id = FindBuffer(*device_addr, static_cast(size)), }; }; if (current_draw_indirect->include_count) { @@ -1231,7 +1233,7 @@ void BufferCache

::UpdateUniformBuffers(size_t stage) { channel_state->dirty_uniform_buffers[stage] |= 1U << index; } // Resolve buffer - binding.buffer_id = FindBuffer(binding.cpu_addr, binding.size); + binding.buffer_id = FindBuffer(binding.device_addr, binding.size); }); } @@ -1240,7 +1242,7 @@ void BufferCache

::UpdateStorageBuffers(size_t stage) { ForEachEnabledBit(channel_state->enabled_storage_buffers[stage], [&](u32 index) { // Resolve buffer Binding& binding = channel_state->storage_buffers[stage][index]; - const BufferId buffer_id = FindBuffer(binding.cpu_addr, binding.size); + const BufferId buffer_id = FindBuffer(binding.device_addr, binding.size); binding.buffer_id = buffer_id; }); } @@ -1249,7 +1251,7 @@ template void BufferCache

::UpdateTextureBuffers(size_t stage) { ForEachEnabledBit(channel_state->enabled_texture_buffers[stage], [&](u32 index) { Binding& binding = channel_state->texture_buffers[stage][index]; - binding.buffer_id = FindBuffer(binding.cpu_addr, binding.size); + binding.buffer_id = FindBuffer(binding.device_addr, binding.size); }); } @@ -1268,14 +1270,14 @@ void BufferCache

::UpdateTransformFeedbackBuffer(u32 index) { const auto& binding = maxwell3d->regs.transform_feedback.buffers[index]; const GPUVAddr gpu_addr = binding.Address() + binding.start_offset; const u32 size = binding.size; - const std::optional cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr); - if (binding.enable == 0 || size == 0 || !cpu_addr) { + const std::optional device_addr = gpu_memory->GpuToCpuAddress(gpu_addr); + if (binding.enable == 0 || size == 0 || !device_addr) { channel_state->transform_feedback_buffers[index] = NULL_BINDING; return; } - const BufferId buffer_id = FindBuffer(*cpu_addr, size); + const BufferId buffer_id = FindBuffer(*device_addr, size); channel_state->transform_feedback_buffers[index] = Binding{ - .cpu_addr = *cpu_addr, + .device_addr = *device_addr, .size = size, .buffer_id = buffer_id, }; @@ -1289,13 +1291,13 @@ void BufferCache

::UpdateComputeUniformBuffers() { const auto& launch_desc = kepler_compute->launch_description; if (((launch_desc.const_buffer_enable_mask >> index) & 1) != 0) { const auto& cbuf = launch_desc.const_buffer_config[index]; - const std::optional cpu_addr = gpu_memory->GpuToCpuAddress(cbuf.Address()); - if (cpu_addr) { - binding.cpu_addr = *cpu_addr; + const std::optional device_addr = gpu_memory->GpuToCpuAddress(cbuf.Address()); + if (device_addr) { + binding.device_addr = *device_addr; binding.size = cbuf.size; } } - binding.buffer_id = FindBuffer(binding.cpu_addr, binding.size); + binding.buffer_id = FindBuffer(binding.device_addr, binding.size); }); } @@ -1304,7 +1306,7 @@ void BufferCache

::UpdateComputeStorageBuffers() { ForEachEnabledBit(channel_state->enabled_compute_storage_buffers, [&](u32 index) { // Resolve buffer Binding& binding = channel_state->compute_storage_buffers[index]; - binding.buffer_id = FindBuffer(binding.cpu_addr, binding.size); + binding.buffer_id = FindBuffer(binding.device_addr, binding.size); }); } @@ -1312,45 +1314,63 @@ template void BufferCache

::UpdateComputeTextureBuffers() { ForEachEnabledBit(channel_state->enabled_compute_texture_buffers, [&](u32 index) { Binding& binding = channel_state->compute_texture_buffers[index]; - binding.buffer_id = FindBuffer(binding.cpu_addr, binding.size); + binding.buffer_id = FindBuffer(binding.device_addr, binding.size); }); } template -void BufferCache

::MarkWrittenBuffer(BufferId buffer_id, VAddr cpu_addr, u32 size) { - memory_tracker.MarkRegionAsGpuModified(cpu_addr, size); +void BufferCache

::MarkWrittenBuffer(BufferId buffer_id, DAddr device_addr, u32 size) { + memory_tracker.MarkRegionAsGpuModified(device_addr, size); - const IntervalType base_interval{cpu_addr, cpu_addr + size}; + const IntervalType base_interval{device_addr, device_addr + size}; common_ranges.add(base_interval); uncommitted_ranges.add(base_interval); } template -BufferId BufferCache

::FindBuffer(VAddr cpu_addr, u32 size) { - if (cpu_addr == 0) { +BufferId BufferCache

::FindBuffer(DAddr device_addr, u32 size) { + if (device_addr == 0) { return NULL_BUFFER_ID; } - const u64 page = cpu_addr >> CACHING_PAGEBITS; + const u64 page = device_addr >> CACHING_PAGEBITS; const BufferId buffer_id = page_table[page]; if (!buffer_id) { - return CreateBuffer(cpu_addr, size); + return CreateBuffer(device_addr, size); } const Buffer& buffer = slot_buffers[buffer_id]; - if (buffer.IsInBounds(cpu_addr, size)) { + if (buffer.IsInBounds(device_addr, size)) { return buffer_id; } - return CreateBuffer(cpu_addr, size); + return CreateBuffer(device_addr, size); } template -typename BufferCache

::OverlapResult BufferCache

::ResolveOverlaps(VAddr cpu_addr, +typename BufferCache

::OverlapResult BufferCache

::ResolveOverlaps(DAddr device_addr, u32 wanted_size) { static constexpr int STREAM_LEAP_THRESHOLD = 16; boost::container::small_vector overlap_ids; - VAddr begin = cpu_addr; - VAddr end = cpu_addr + wanted_size; + DAddr begin = device_addr; + DAddr end = device_addr + wanted_size; int stream_score = 0; bool has_stream_leap = false; + auto expand_begin = [&](DAddr add_value) { + static constexpr DAddr min_page = CACHING_PAGESIZE + Core::Memory::YUZU_PAGESIZE; + if (add_value > begin - min_page ) { + begin = min_page; + device_addr = Core::Memory::YUZU_PAGESIZE; + return; + } + begin -= add_value; + device_addr = begin - CACHING_PAGESIZE; + }; + auto expand_end = [&](DAddr add_value) { + static constexpr DAddr max_page = 1ULL << Tegra::MaxwellDeviceMemoryManager::AS_BITS; + if (add_value > max_page - end ) { + end = max_page; + return; + } + end += add_value; + }; if (begin == 0) { return OverlapResult{ .ids = std::move(overlap_ids), @@ -1359,9 +1379,9 @@ typename BufferCache

::OverlapResult BufferCache

::ResolveOverlaps(VAddr cpu .has_stream_leap = has_stream_leap, }; } - for (; cpu_addr >> CACHING_PAGEBITS < Common::DivCeil(end, CACHING_PAGESIZE); - cpu_addr += CACHING_PAGESIZE) { - const BufferId overlap_id = page_table[cpu_addr >> CACHING_PAGEBITS]; + for (; device_addr >> CACHING_PAGEBITS < Common::DivCeil(end, CACHING_PAGESIZE); + device_addr += CACHING_PAGESIZE) { + const BufferId overlap_id = page_table[device_addr >> CACHING_PAGEBITS]; if (!overlap_id) { continue; } @@ -1371,12 +1391,12 @@ typename BufferCache

::OverlapResult BufferCache

::ResolveOverlaps(VAddr cpu } overlap_ids.push_back(overlap_id); overlap.Pick(); - const VAddr overlap_cpu_addr = overlap.CpuAddr(); - const bool expands_left = overlap_cpu_addr < begin; + const DAddr overlap_device_addr = overlap.CpuAddr(); + const bool expands_left = overlap_device_addr < begin; if (expands_left) { - begin = overlap_cpu_addr; + begin = overlap_device_addr; } - const VAddr overlap_end = overlap_cpu_addr + overlap.SizeBytes(); + const DAddr overlap_end = overlap_device_addr + overlap.SizeBytes(); const bool expands_right = overlap_end > end; if (overlap_end > end) { end = overlap_end; @@ -1387,11 +1407,10 @@ typename BufferCache

::OverlapResult BufferCache

::ResolveOverlaps(VAddr cpu // as a stream buffer. Increase the size to skip constantly recreating buffers. has_stream_leap = true; if (expands_right) { - begin -= CACHING_PAGESIZE * 256; - cpu_addr = begin - CACHING_PAGESIZE; + expand_begin(CACHING_PAGESIZE * 128); } if (expands_left) { - end += CACHING_PAGESIZE * 256; + expand_end(CACHING_PAGESIZE * 128); } } } @@ -1424,13 +1443,13 @@ void BufferCache

::JoinOverlap(BufferId new_buffer_id, BufferId overlap_id, } template -BufferId BufferCache

::CreateBuffer(VAddr cpu_addr, u32 wanted_size) { - VAddr cpu_addr_end = Common::AlignUp(cpu_addr + wanted_size, CACHING_PAGESIZE); - cpu_addr = Common::AlignDown(cpu_addr, CACHING_PAGESIZE); - wanted_size = static_cast(cpu_addr_end - cpu_addr); - const OverlapResult overlap = ResolveOverlaps(cpu_addr, wanted_size); +BufferId BufferCache

::CreateBuffer(DAddr device_addr, u32 wanted_size) { + DAddr device_addr_end = Common::AlignUp(device_addr + wanted_size, CACHING_PAGESIZE); + device_addr = Common::AlignDown(device_addr, CACHING_PAGESIZE); + wanted_size = static_cast(device_addr_end - device_addr); + const OverlapResult overlap = ResolveOverlaps(device_addr, wanted_size); const u32 size = static_cast(overlap.end - overlap.begin); - const BufferId new_buffer_id = slot_buffers.insert(runtime, rasterizer, overlap.begin, size); + const BufferId new_buffer_id = slot_buffers.insert(runtime, overlap.begin, size); auto& new_buffer = slot_buffers[new_buffer_id]; const size_t size_bytes = new_buffer.SizeBytes(); runtime.ClearBuffer(new_buffer, 0, size_bytes, 0); @@ -1465,10 +1484,10 @@ void BufferCache

::ChangeRegister(BufferId buffer_id) { total_used_memory -= Common::AlignUp(size, 1024); lru_cache.Free(buffer.getLRUID()); } - const VAddr cpu_addr_begin = buffer.CpuAddr(); - const VAddr cpu_addr_end = cpu_addr_begin + size; - const u64 page_begin = cpu_addr_begin / CACHING_PAGESIZE; - const u64 page_end = Common::DivCeil(cpu_addr_end, CACHING_PAGESIZE); + const DAddr device_addr_begin = buffer.CpuAddr(); + const DAddr device_addr_end = device_addr_begin + size; + const u64 page_begin = device_addr_begin / CACHING_PAGESIZE; + const u64 page_end = Common::DivCeil(device_addr_end, CACHING_PAGESIZE); for (u64 page = page_begin; page != page_end; ++page) { if constexpr (insert) { page_table[page] = buffer_id; @@ -1486,15 +1505,15 @@ void BufferCache

::TouchBuffer(Buffer& buffer, BufferId buffer_id) noexcept { } template -bool BufferCache

::SynchronizeBuffer(Buffer& buffer, VAddr cpu_addr, u32 size) { +bool BufferCache

::SynchronizeBuffer(Buffer& buffer, DAddr device_addr, u32 size) { boost::container::small_vector copies; u64 total_size_bytes = 0; u64 largest_copy = 0; - VAddr buffer_start = buffer.CpuAddr(); - memory_tracker.ForEachUploadRange(cpu_addr, size, [&](u64 cpu_addr_out, u64 range_size) { + DAddr buffer_start = buffer.CpuAddr(); + memory_tracker.ForEachUploadRange(device_addr, size, [&](u64 device_addr_out, u64 range_size) { copies.push_back(BufferCopy{ .src_offset = total_size_bytes, - .dst_offset = cpu_addr_out - buffer_start, + .dst_offset = device_addr_out - buffer_start, .size = range_size, }); total_size_bytes += range_size; @@ -1526,14 +1545,14 @@ void BufferCache

::ImmediateUploadMemory([[maybe_unused]] Buffer& buffer, std::span immediate_buffer; for (const BufferCopy& copy : copies) { std::span upload_span; - const VAddr cpu_addr = buffer.CpuAddr() + copy.dst_offset; - if (IsRangeGranular(cpu_addr, copy.size)) { - upload_span = std::span(cpu_memory.GetPointer(cpu_addr), copy.size); + const DAddr device_addr = buffer.CpuAddr() + copy.dst_offset; + if (IsRangeGranular(device_addr, copy.size)) { + upload_span = std::span(device_memory.GetPointer(device_addr), copy.size); } else { if (immediate_buffer.empty()) { immediate_buffer = ImmediateBuffer(largest_copy); } - cpu_memory.ReadBlockUnsafe(cpu_addr, immediate_buffer.data(), copy.size); + device_memory.ReadBlockUnsafe(device_addr, immediate_buffer.data(), copy.size); upload_span = immediate_buffer.subspan(0, copy.size); } buffer.ImmediateUpload(copy.dst_offset, upload_span); @@ -1550,8 +1569,8 @@ void BufferCache

::MappedUploadMemory([[maybe_unused]] Buffer& buffer, const std::span staging_pointer = upload_staging.mapped_span; for (BufferCopy& copy : copies) { u8* const src_pointer = staging_pointer.data() + copy.src_offset; - const VAddr cpu_addr = buffer.CpuAddr() + copy.dst_offset; - cpu_memory.ReadBlockUnsafe(cpu_addr, src_pointer, copy.size); + const DAddr device_addr = buffer.CpuAddr() + copy.dst_offset; + device_memory.ReadBlockUnsafe(device_addr, src_pointer, copy.size); // Apply the staging offset copy.src_offset += upload_staging.offset; @@ -1562,14 +1581,14 @@ void BufferCache

::MappedUploadMemory([[maybe_unused]] Buffer& buffer, } template -bool BufferCache

::InlineMemory(VAddr dest_address, size_t copy_size, +bool BufferCache

::InlineMemory(DAddr dest_address, size_t copy_size, std::span inlined_buffer) { const bool is_dirty = IsRegionRegistered(dest_address, copy_size); if (!is_dirty) { return false; } - VAddr aligned_start = Common::AlignDown(dest_address, YUZU_PAGESIZE); - VAddr aligned_end = Common::AlignUp(dest_address + copy_size, YUZU_PAGESIZE); + DAddr aligned_start = Common::AlignDown(dest_address, YUZU_PAGESIZE); + DAddr aligned_end = Common::AlignUp(dest_address + copy_size, YUZU_PAGESIZE); if (!IsRegionGpuModified(aligned_start, aligned_end - aligned_start)) { return false; } @@ -1580,7 +1599,7 @@ bool BufferCache

::InlineMemory(VAddr dest_address, size_t copy_size, } template -void BufferCache

::InlineMemoryImplementation(VAddr dest_address, size_t copy_size, +void BufferCache

::InlineMemoryImplementation(DAddr dest_address, size_t copy_size, std::span inlined_buffer) { const IntervalType subtract_interval{dest_address, dest_address + copy_size}; ClearDownload(subtract_interval); @@ -1612,14 +1631,14 @@ void BufferCache

::DownloadBufferMemory(Buffer& buffer) { } template -void BufferCache

::DownloadBufferMemory(Buffer& buffer, VAddr cpu_addr, u64 size) { +void BufferCache

::DownloadBufferMemory(Buffer& buffer, DAddr device_addr, u64 size) { boost::container::small_vector copies; u64 total_size_bytes = 0; u64 largest_copy = 0; memory_tracker.ForEachDownloadRangeAndClear( - cpu_addr, size, [&](u64 cpu_addr_out, u64 range_size) { - const VAddr buffer_addr = buffer.CpuAddr(); - const auto add_download = [&](VAddr start, VAddr end) { + device_addr, size, [&](u64 device_addr_out, u64 range_size) { + const DAddr buffer_addr = buffer.CpuAddr(); + const auto add_download = [&](DAddr start, DAddr end) { const u64 new_offset = start - buffer_addr; const u64 new_size = end - start; copies.push_back(BufferCopy{ @@ -1634,8 +1653,8 @@ void BufferCache

::DownloadBufferMemory(Buffer& buffer, VAddr cpu_addr, u64 si largest_copy = std::max(largest_copy, new_size); }; - const VAddr start_address = cpu_addr_out; - const VAddr end_address = start_address + range_size; + const DAddr start_address = device_addr_out; + const DAddr end_address = start_address + range_size; ForEachInRangeSet(common_ranges, start_address, range_size, add_download); const IntervalType subtract_interval{start_address, end_address}; ClearDownload(subtract_interval); @@ -1658,18 +1677,18 @@ void BufferCache

::DownloadBufferMemory(Buffer& buffer, VAddr cpu_addr, u64 si runtime.CopyBuffer(download_staging.buffer, buffer, copies_span, true); runtime.Finish(); for (const BufferCopy& copy : copies) { - const VAddr copy_cpu_addr = buffer.CpuAddr() + copy.src_offset; + const DAddr copy_device_addr = buffer.CpuAddr() + copy.src_offset; // Undo the modified offset const u64 dst_offset = copy.dst_offset - download_staging.offset; const u8* copy_mapped_memory = mapped_memory + dst_offset; - cpu_memory.WriteBlockUnsafe(copy_cpu_addr, copy_mapped_memory, copy.size); + device_memory.WriteBlockUnsafe(copy_device_addr, copy_mapped_memory, copy.size); } } else { const std::span immediate_buffer = ImmediateBuffer(largest_copy); for (const BufferCopy& copy : copies) { buffer.ImmediateDownload(copy.src_offset, immediate_buffer.subspan(0, copy.size)); - const VAddr copy_cpu_addr = buffer.CpuAddr() + copy.src_offset; - cpu_memory.WriteBlockUnsafe(copy_cpu_addr, immediate_buffer.data(), copy.size); + const DAddr copy_device_addr = buffer.CpuAddr() + copy.src_offset; + device_memory.WriteBlockUnsafe(copy_device_addr, immediate_buffer.data(), copy.size); } } } @@ -1758,20 +1777,20 @@ Binding BufferCache

::StorageBufferBinding(GPUVAddr ssbo_addr, u32 cbuf_index, const GPUVAddr aligned_gpu_addr = Common::AlignDown(gpu_addr, alignment); const u32 aligned_size = static_cast(gpu_addr - aligned_gpu_addr) + size; - const std::optional aligned_cpu_addr = gpu_memory->GpuToCpuAddress(aligned_gpu_addr); - if (!aligned_cpu_addr || size == 0) { + const std::optional aligned_device_addr = gpu_memory->GpuToCpuAddress(aligned_gpu_addr); + if (!aligned_device_addr || size == 0) { LOG_WARNING(HW_GPU, "Failed to find storage buffer for cbuf index {}", cbuf_index); return NULL_BINDING; } - const std::optional cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr); - ASSERT_MSG(cpu_addr, "Unaligned storage buffer address not found for cbuf index {}", + const std::optional device_addr = gpu_memory->GpuToCpuAddress(gpu_addr); + ASSERT_MSG(device_addr, "Unaligned storage buffer address not found for cbuf index {}", cbuf_index); // The end address used for size calculation does not need to be aligned - const VAddr cpu_end = Common::AlignUp(*cpu_addr + size, Core::Memory::YUZU_PAGESIZE); + const DAddr cpu_end = Common::AlignUp(*device_addr + size, Core::Memory::YUZU_PAGESIZE); const Binding binding{ - .cpu_addr = *aligned_cpu_addr, - .size = is_written ? aligned_size : static_cast(cpu_end - *aligned_cpu_addr), + .device_addr = *aligned_device_addr, + .size = is_written ? aligned_size : static_cast(cpu_end - *aligned_device_addr), .buffer_id = BufferId{}, }; return binding; @@ -1780,15 +1799,15 @@ Binding BufferCache

::StorageBufferBinding(GPUVAddr ssbo_addr, u32 cbuf_index, template TextureBufferBinding BufferCache

::GetTextureBufferBinding(GPUVAddr gpu_addr, u32 size, PixelFormat format) { - const std::optional cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr); + const std::optional device_addr = gpu_memory->GpuToCpuAddress(gpu_addr); TextureBufferBinding binding; - if (!cpu_addr || size == 0) { - binding.cpu_addr = 0; + if (!device_addr || size == 0) { + binding.device_addr = 0; binding.size = 0; binding.buffer_id = NULL_BUFFER_ID; binding.format = PixelFormat::Invalid; } else { - binding.cpu_addr = *cpu_addr; + binding.device_addr = *device_addr; binding.size = size; binding.buffer_id = BufferId{}; binding.format = format; @@ -1797,14 +1816,14 @@ TextureBufferBinding BufferCache

::GetTextureBufferBinding(GPUVAddr gpu_addr, } template -std::span BufferCache

::ImmediateBufferWithData(VAddr cpu_addr, size_t size) { - u8* const base_pointer = cpu_memory.GetPointer(cpu_addr); - if (IsRangeGranular(cpu_addr, size) || - base_pointer + size == cpu_memory.GetPointer(cpu_addr + size)) { +std::span BufferCache

::ImmediateBufferWithData(DAddr device_addr, size_t size) { + u8* const base_pointer = device_memory.GetPointer(device_addr); + if (IsRangeGranular(device_addr, size) || + base_pointer + size == device_memory.GetPointer(device_addr + size)) { return std::span(base_pointer, size); } else { const std::span span = ImmediateBuffer(size); - cpu_memory.ReadBlockUnsafe(cpu_addr, span.data(), size); + device_memory.ReadBlockUnsafe(device_addr, span.data(), size); return span; } } @@ -1828,13 +1847,14 @@ bool BufferCache

::HasFastUniformBufferBound(size_t stage, u32 binding_index) template std::pair::Buffer*, u32> BufferCache

::GetDrawIndirectCount() { auto& buffer = slot_buffers[channel_state->count_buffer_binding.buffer_id]; - return std::make_pair(&buffer, buffer.Offset(channel_state->count_buffer_binding.cpu_addr)); + return std::make_pair(&buffer, buffer.Offset(channel_state->count_buffer_binding.device_addr)); } template std::pair::Buffer*, u32> BufferCache

::GetDrawIndirectBuffer() { auto& buffer = slot_buffers[channel_state->indirect_buffer_binding.buffer_id]; - return std::make_pair(&buffer, buffer.Offset(channel_state->indirect_buffer_binding.cpu_addr)); + return std::make_pair(&buffer, + buffer.Offset(channel_state->indirect_buffer_binding.device_addr)); } } // namespace VideoCommon diff --git a/src/video_core/buffer_cache/buffer_cache_base.h b/src/video_core/buffer_cache/buffer_cache_base.h index d6d696d8c..4074003e4 100644 --- a/src/video_core/buffer_cache/buffer_cache_base.h +++ b/src/video_core/buffer_cache/buffer_cache_base.h @@ -32,7 +32,6 @@ #include "common/microprofile.h" #include "common/scope_exit.h" #include "common/settings.h" -#include "core/memory.h" #include "video_core/buffer_cache/buffer_base.h" #include "video_core/control/channel_state_cache.h" #include "video_core/delayed_destruction_ring.h" @@ -41,7 +40,6 @@ #include "video_core/engines/kepler_compute.h" #include "video_core/engines/maxwell_3d.h" #include "video_core/memory_manager.h" -#include "video_core/rasterizer_interface.h" #include "video_core/surface.h" #include "video_core/texture_cache/slot_vector.h" #include "video_core/texture_cache/types.h" @@ -94,7 +92,7 @@ static constexpr BufferId NULL_BUFFER_ID{0}; static constexpr u32 DEFAULT_SKIP_CACHE_SIZE = static_cast(4_KiB); struct Binding { - VAddr cpu_addr{}; + DAddr device_addr{}; u32 size{}; BufferId buffer_id; }; @@ -104,7 +102,7 @@ struct TextureBufferBinding : Binding { }; static constexpr Binding NULL_BINDING{ - .cpu_addr = 0, + .device_addr = 0, .size = 0, .buffer_id = NULL_BUFFER_ID, }; @@ -204,10 +202,10 @@ class BufferCache : public VideoCommon::ChannelSetupCaches; - using IntervalInstance = boost::icl::interval_type_default; - using IntervalAllocator = boost::fast_pool_allocator; - using IntervalSet = boost::icl::interval_set; + using IntervalCompare = std::less; + using IntervalInstance = boost::icl::interval_type_default; + using IntervalAllocator = boost::fast_pool_allocator; + using IntervalSet = boost::icl::interval_set; using IntervalType = typename IntervalSet::interval_type; template @@ -230,32 +228,31 @@ class BufferCache : public VideoCommon::ChannelSetupCaches; using OverlapSection = boost::icl::inter_section; - using OverlapCounter = boost::icl::split_interval_map; + using OverlapCounter = boost::icl::split_interval_map; struct OverlapResult { boost::container::small_vector ids; - VAddr begin; - VAddr end; + DAddr begin; + DAddr end; bool has_stream_leap = false; }; public: - explicit BufferCache(VideoCore::RasterizerInterface& rasterizer_, - Core::Memory::Memory& cpu_memory_, Runtime& runtime_); + explicit BufferCache(Tegra::MaxwellDeviceMemoryManager& device_memory_, Runtime& runtime_); void TickFrame(); - void WriteMemory(VAddr cpu_addr, u64 size); + void WriteMemory(DAddr device_addr, u64 size); - void CachedWriteMemory(VAddr cpu_addr, u64 size); + void CachedWriteMemory(DAddr device_addr, u64 size); - bool OnCPUWrite(VAddr cpu_addr, u64 size); + bool OnCPUWrite(DAddr device_addr, u64 size); - void DownloadMemory(VAddr cpu_addr, u64 size); + void DownloadMemory(DAddr device_addr, u64 size); - std::optional GetFlushArea(VAddr cpu_addr, u64 size); + std::optional GetFlushArea(DAddr device_addr, u64 size); - bool InlineMemory(VAddr dest_address, size_t copy_size, std::span inlined_buffer); + bool InlineMemory(DAddr dest_address, size_t copy_size, std::span inlined_buffer); void BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr, u32 size); @@ -300,7 +297,7 @@ public: ObtainBufferSynchronize sync_info, ObtainBufferOperation post_op); - [[nodiscard]] std::pair ObtainCPUBuffer(VAddr gpu_addr, u32 size, + [[nodiscard]] std::pair ObtainCPUBuffer(DAddr gpu_addr, u32 size, ObtainBufferSynchronize sync_info, ObtainBufferOperation post_op); void FlushCachedWrites(); @@ -326,13 +323,13 @@ public: bool DMAClear(GPUVAddr src_address, u64 amount, u32 value); /// Return true when a CPU region is modified from the GPU - [[nodiscard]] bool IsRegionGpuModified(VAddr addr, size_t size); + [[nodiscard]] bool IsRegionGpuModified(DAddr addr, size_t size); /// Return true when a region is registered on the cache - [[nodiscard]] bool IsRegionRegistered(VAddr addr, size_t size); + [[nodiscard]] bool IsRegionRegistered(DAddr addr, size_t size); /// Return true when a CPU region is modified from the CPU - [[nodiscard]] bool IsRegionCpuModified(VAddr addr, size_t size); + [[nodiscard]] bool IsRegionCpuModified(DAddr addr, size_t size); void SetDrawIndirect( const Tegra::Engines::DrawManager::IndirectParams* current_draw_indirect_) { @@ -366,9 +363,9 @@ private: } template - void ForEachBufferInRange(VAddr cpu_addr, u64 size, Func&& func) { - const u64 page_end = Common::DivCeil(cpu_addr + size, CACHING_PAGESIZE); - for (u64 page = cpu_addr >> CACHING_PAGEBITS; page < page_end;) { + void ForEachBufferInRange(DAddr device_addr, u64 size, Func&& func) { + const u64 page_end = Common::DivCeil(device_addr + size, CACHING_PAGESIZE); + for (u64 page = device_addr >> CACHING_PAGEBITS; page < page_end;) { const BufferId buffer_id = page_table[page]; if (!buffer_id) { ++page; @@ -377,15 +374,15 @@ private: Buffer& buffer = slot_buffers[buffer_id]; func(buffer_id, buffer); - const VAddr end_addr = buffer.CpuAddr() + buffer.SizeBytes(); + const DAddr end_addr = buffer.CpuAddr() + buffer.SizeBytes(); page = Common::DivCeil(end_addr, CACHING_PAGESIZE); } } template - void ForEachInRangeSet(IntervalSet& current_range, VAddr cpu_addr, u64 size, Func&& func) { - const VAddr start_address = cpu_addr; - const VAddr end_address = start_address + size; + void ForEachInRangeSet(IntervalSet& current_range, DAddr device_addr, u64 size, Func&& func) { + const DAddr start_address = device_addr; + const DAddr end_address = start_address + size; const IntervalType search_interval{start_address, end_address}; auto it = current_range.lower_bound(search_interval); if (it == current_range.end()) { @@ -393,8 +390,8 @@ private: } auto end_it = current_range.upper_bound(search_interval); for (; it != end_it; it++) { - VAddr inter_addr_end = it->upper(); - VAddr inter_addr = it->lower(); + DAddr inter_addr_end = it->upper(); + DAddr inter_addr = it->lower(); if (inter_addr_end > end_address) { inter_addr_end = end_address; } @@ -406,10 +403,10 @@ private: } template - void ForEachInOverlapCounter(OverlapCounter& current_range, VAddr cpu_addr, u64 size, + void ForEachInOverlapCounter(OverlapCounter& current_range, DAddr device_addr, u64 size, Func&& func) { - const VAddr start_address = cpu_addr; - const VAddr end_address = start_address + size; + const DAddr start_address = device_addr; + const DAddr end_address = start_address + size; const IntervalType search_interval{start_address, end_address}; auto it = current_range.lower_bound(search_interval); if (it == current_range.end()) { @@ -418,8 +415,8 @@ private: auto end_it = current_range.upper_bound(search_interval); for (; it != end_it; it++) { auto& inter = it->first; - VAddr inter_addr_end = inter.upper(); - VAddr inter_addr = inter.lower(); + DAddr inter_addr_end = inter.upper(); + DAddr inter_addr = inter.lower(); if (inter_addr_end > end_address) { inter_addr_end = end_address; } @@ -451,9 +448,9 @@ private: } while (any_removals); } - static bool IsRangeGranular(VAddr cpu_addr, size_t size) { - return (cpu_addr & ~Core::Memory::YUZU_PAGEMASK) == - ((cpu_addr + size) & ~Core::Memory::YUZU_PAGEMASK); + static bool IsRangeGranular(DAddr device_addr, size_t size) { + return (device_addr & ~Core::Memory::YUZU_PAGEMASK) == + ((device_addr + size) & ~Core::Memory::YUZU_PAGEMASK); } void RunGarbageCollector(); @@ -508,15 +505,15 @@ private: void UpdateComputeTextureBuffers(); - void MarkWrittenBuffer(BufferId buffer_id, VAddr cpu_addr, u32 size); + void MarkWrittenBuffer(BufferId buffer_id, DAddr device_addr, u32 size); - [[nodiscard]] BufferId FindBuffer(VAddr cpu_addr, u32 size); + [[nodiscard]] BufferId FindBuffer(DAddr device_addr, u32 size); - [[nodiscard]] OverlapResult ResolveOverlaps(VAddr cpu_addr, u32 wanted_size); + [[nodiscard]] OverlapResult ResolveOverlaps(DAddr device_addr, u32 wanted_size); void JoinOverlap(BufferId new_buffer_id, BufferId overlap_id, bool accumulate_stream_score); - [[nodiscard]] BufferId CreateBuffer(VAddr cpu_addr, u32 wanted_size); + [[nodiscard]] BufferId CreateBuffer(DAddr device_addr, u32 wanted_size); void Register(BufferId buffer_id); @@ -527,7 +524,7 @@ private: void TouchBuffer(Buffer& buffer, BufferId buffer_id) noexcept; - bool SynchronizeBuffer(Buffer& buffer, VAddr cpu_addr, u32 size); + bool SynchronizeBuffer(Buffer& buffer, DAddr device_addr, u32 size); void UploadMemory(Buffer& buffer, u64 total_size_bytes, u64 largest_copy, std::span copies); @@ -539,7 +536,7 @@ private: void DownloadBufferMemory(Buffer& buffer_id); - void DownloadBufferMemory(Buffer& buffer_id, VAddr cpu_addr, u64 size); + void DownloadBufferMemory(Buffer& buffer_id, DAddr device_addr, u64 size); void DeleteBuffer(BufferId buffer_id, bool do_not_mark = false); @@ -549,7 +546,7 @@ private: [[nodiscard]] TextureBufferBinding GetTextureBufferBinding(GPUVAddr gpu_addr, u32 size, PixelFormat format); - [[nodiscard]] std::span ImmediateBufferWithData(VAddr cpu_addr, size_t size); + [[nodiscard]] std::span ImmediateBufferWithData(DAddr device_addr, size_t size); [[nodiscard]] std::span ImmediateBuffer(size_t wanted_capacity); @@ -557,11 +554,10 @@ private: void ClearDownload(IntervalType subtract_interval); - void InlineMemoryImplementation(VAddr dest_address, size_t copy_size, + void InlineMemoryImplementation(DAddr dest_address, size_t copy_size, std::span inlined_buffer); - VideoCore::RasterizerInterface& rasterizer; - Core::Memory::Memory& cpu_memory; + Tegra::MaxwellDeviceMemoryManager& device_memory; SlotVector slot_buffers; DelayedDestructionRing delayed_destruction_ring; @@ -598,7 +594,7 @@ private: u64 critical_memory = 0; BufferId inline_buffer_id; - std::array> CACHING_PAGEBITS)> page_table; + std::array> CACHING_PAGEBITS)> page_table; Common::ScratchBuffer tmp_buffer; }; diff --git a/src/video_core/buffer_cache/memory_tracker_base.h b/src/video_core/buffer_cache/memory_tracker_base.h index 6c1c8287b..c95eed1f6 100644 --- a/src/video_core/buffer_cache/memory_tracker_base.h +++ b/src/video_core/buffer_cache/memory_tracker_base.h @@ -17,19 +17,19 @@ namespace VideoCommon { -template +template class MemoryTrackerBase { - static constexpr size_t MAX_CPU_PAGE_BITS = 39; + static constexpr size_t MAX_CPU_PAGE_BITS = 34; static constexpr size_t HIGHER_PAGE_BITS = 22; static constexpr size_t HIGHER_PAGE_SIZE = 1ULL << HIGHER_PAGE_BITS; static constexpr size_t HIGHER_PAGE_MASK = HIGHER_PAGE_SIZE - 1ULL; static constexpr size_t NUM_HIGH_PAGES = 1ULL << (MAX_CPU_PAGE_BITS - HIGHER_PAGE_BITS); static constexpr size_t MANAGER_POOL_SIZE = 32; static constexpr size_t WORDS_STACK_NEEDED = HIGHER_PAGE_SIZE / BYTES_PER_WORD; - using Manager = WordManager; + using Manager = WordManager; public: - MemoryTrackerBase(RasterizerInterface& rasterizer_) : rasterizer{&rasterizer_} {} + MemoryTrackerBase(DeviceTracker& device_tracker_) : device_tracker{&device_tracker_} {} ~MemoryTrackerBase() = default; /// Returns the inclusive CPU modified range in a begin end pair @@ -74,7 +74,7 @@ public: }); } - /// Mark region as CPU modified, notifying the rasterizer about this change + /// Mark region as CPU modified, notifying the device_tracker about this change void MarkRegionAsCpuModified(VAddr dirty_cpu_addr, u64 query_size) { IteratePages(dirty_cpu_addr, query_size, [](Manager* manager, u64 offset, size_t size) { @@ -83,7 +83,7 @@ public: }); } - /// Unmark region as CPU modified, notifying the rasterizer about this change + /// Unmark region as CPU modified, notifying the device_tracker about this change void UnmarkRegionAsCpuModified(VAddr dirty_cpu_addr, u64 query_size) { IteratePages(dirty_cpu_addr, query_size, [](Manager* manager, u64 offset, size_t size) { @@ -139,7 +139,7 @@ public: }); } - /// Flushes cached CPU writes, and notify the rasterizer about the deltas + /// Flushes cached CPU writes, and notify the device_tracker about the deltas void FlushCachedWrites(VAddr query_cpu_addr, u64 query_size) noexcept { IteratePages(query_cpu_addr, query_size, [](Manager* manager, [[maybe_unused]] u64 offset, @@ -280,7 +280,7 @@ private: manager_pool.emplace_back(); auto& last_pool = manager_pool.back(); for (size_t i = 0; i < MANAGER_POOL_SIZE; i++) { - new (&last_pool[i]) Manager(0, *rasterizer, HIGHER_PAGE_SIZE); + new (&last_pool[i]) Manager(0, *device_tracker, HIGHER_PAGE_SIZE); free_managers.push_back(&last_pool[i]); } return on_return(); @@ -293,7 +293,7 @@ private: std::unordered_set cached_pages; - RasterizerInterface* rasterizer = nullptr; + DeviceTracker* device_tracker = nullptr; }; } // namespace VideoCommon diff --git a/src/video_core/buffer_cache/word_manager.h b/src/video_core/buffer_cache/word_manager.h index a336bde41..56ab4f5f1 100644 --- a/src/video_core/buffer_cache/word_manager.h +++ b/src/video_core/buffer_cache/word_manager.h @@ -163,11 +163,11 @@ struct Words { WordsArray preflushable; }; -template +template class WordManager { public: - explicit WordManager(VAddr cpu_addr_, RasterizerInterface& rasterizer_, u64 size_bytes) - : cpu_addr{cpu_addr_}, rasterizer{&rasterizer_}, words{size_bytes} {} + explicit WordManager(VAddr cpu_addr_, DeviceTracker& tracker_, u64 size_bytes) + : cpu_addr{cpu_addr_}, tracker{&tracker_}, words{size_bytes} {} explicit WordManager() = default; @@ -279,7 +279,7 @@ public: } /** - * Loop over each page in the given range, turn off those bits and notify the rasterizer if + * Loop over each page in the given range, turn off those bits and notify the tracker if * needed. Call the given function on each turned off range. * * @param query_cpu_range Base CPU address to loop over @@ -459,26 +459,26 @@ private: } /** - * Notify rasterizer about changes in the CPU tracking state of a word in the buffer + * Notify tracker about changes in the CPU tracking state of a word in the buffer * - * @param word_index Index to the word to notify to the rasterizer + * @param word_index Index to the word to notify to the tracker * @param current_bits Current state of the word * @param new_bits New state of the word * - * @tparam add_to_rasterizer True when the rasterizer should start tracking the new pages + * @tparam add_to_tracker True when the tracker should start tracking the new pages */ - template + template void NotifyRasterizer(u64 word_index, u64 current_bits, u64 new_bits) const { - u64 changed_bits = (add_to_rasterizer ? current_bits : ~current_bits) & new_bits; + u64 changed_bits = (add_to_tracker ? current_bits : ~current_bits) & new_bits; VAddr addr = cpu_addr + word_index * BYTES_PER_WORD; IteratePages(changed_bits, [&](size_t offset, size_t size) { - rasterizer->UpdatePagesCachedCount(addr + offset * BYTES_PER_PAGE, - size * BYTES_PER_PAGE, add_to_rasterizer ? 1 : -1); + tracker->UpdatePagesCachedCount(addr + offset * BYTES_PER_PAGE, + size * BYTES_PER_PAGE, add_to_tracker ? 1 : -1); }); } VAddr cpu_addr = 0; - RasterizerInterface* rasterizer = nullptr; + DeviceTracker* tracker = nullptr; Words words; }; -- cgit v1.2.3