From 2df19ef0fd5a91ca87e2c2cf201166a40c9d44dc Mon Sep 17 00:00:00 2001 From: Fernando Sahmkow Date: Sun, 7 May 2023 23:25:34 +0200 Subject: Buffer Cache: disable reactive flushing in it. --- src/video_core/buffer_cache/buffer_cache.h | 11 ++--------- src/video_core/renderer_vulkan/vk_rasterizer.cpp | 7 ------- src/video_core/texture_cache/texture_cache.h | 8 ++++++-- 3 files changed, 8 insertions(+), 18 deletions(-) (limited to 'src/video_core') diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h index 0b15944d6..6624919a4 100644 --- a/src/video_core/buffer_cache/buffer_cache.h +++ b/src/video_core/buffer_cache/buffer_cache.h @@ -203,11 +203,8 @@ bool BufferCache

::DMACopy(GPUVAddr src_address, GPUVAddr dest_address, u64 am const VAddr new_base_address = *cpu_dest_address + diff; const IntervalType add_interval{new_base_address, new_base_address + size}; tmp_intervals.push_back(add_interval); - if (!Settings::values.use_reactive_flushing.GetValue() || - memory_tracker.IsRegionPreflushable(new_base_address, new_base_address + size)) { - uncommitted_ranges.add(add_interval); - pending_ranges.add(add_interval); - } + uncommitted_ranges.add(add_interval); + pending_ranges.add(add_interval); }; ForEachInRangeSet(common_ranges, *cpu_src_address, amount, mirror); // This subtraction in this order is important for overlapping copies. @@ -1234,10 +1231,6 @@ void BufferCache

::MarkWrittenBuffer(BufferId buffer_id, VAddr cpu_addr, u32 s const IntervalType base_interval{cpu_addr, cpu_addr + size}; common_ranges.add(base_interval); - if (Settings::values.use_reactive_flushing.GetValue() && - !memory_tracker.IsRegionPreflushable(cpu_addr, cpu_addr + size)) { - return; - } uncommitted_ranges.add(base_interval); pending_ranges.add(base_interval); } diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp index 2beb0efea..0bcc20544 100644 --- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp +++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp @@ -510,13 +510,6 @@ VideoCore::RasterizerDownloadArea RasterizerVulkan::GetFlushArea(VAddr addr, u64 return *area; } } - { - std::scoped_lock lock{buffer_cache.mutex}; - auto area = buffer_cache.GetFlushArea(addr, size); - if (area) { - return *area; - } - } VideoCore::RasterizerDownloadArea new_area{ .start_address = Common::AlignDown(addr, Core::Memory::YUZU_PAGESIZE), .end_address = Common::AlignUp(addr + size, Core::Memory::YUZU_PAGESIZE), diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h index 29ac01eb4..d49f3a7a0 100644 --- a/src/video_core/texture_cache/texture_cache.h +++ b/src/video_core/texture_cache/texture_cache.h @@ -1323,7 +1323,6 @@ ImageId TextureCache

::JoinImages(const ImageInfo& info, GPUVAddr gpu_addr, VA all_siblings.push_back(overlap_id); } else { bad_overlap_ids.push_back(overlap_id); - overlap.flags |= ImageFlagBits::BadOverlap; } }; ForEachImageInRegion(cpu_addr, size_bytes, region_check); @@ -1434,7 +1433,12 @@ ImageId TextureCache

::JoinImages(const ImageInfo& info, GPUVAddr gpu_addr, VA ImageBase& aliased = slot_images[aliased_id]; aliased.overlapping_images.push_back(new_image_id); new_image.overlapping_images.push_back(aliased_id); - new_image.flags |= ImageFlagBits::BadOverlap; + if (aliased.info.resources.levels == 1 && aliased.overlapping_images.size() > 1) { + aliased.flags |= ImageFlagBits::BadOverlap; + } + if (new_image.info.resources.levels == 1 && new_image.overlapping_images.size() > 1) { + new_image.flags |= ImageFlagBits::BadOverlap; + } } RegisterImage(new_image_id); return new_image_id; -- cgit v1.2.3