summaryrefslogtreecommitdiffstats
path: root/src/video_core/texture_cache
diff options
context:
space:
mode:
authorZach Hilman <DarkLordZach@users.noreply.github.com>2019-07-05 19:39:13 +0200
committerGitHub <noreply@github.com>2019-07-05 19:39:13 +0200
commit772c86a260eb446b0fe4232b0a50666511bef25c (patch)
tree013d92268c06454c93565c83eff2b79b56a00839 /src/video_core/texture_cache
parentMerge pull request #2669 from FearlessTobi/move-cpujit-setting (diff)
parenttexture_cache: Address Feedback (diff)
downloadyuzu-772c86a260eb446b0fe4232b0a50666511bef25c.tar
yuzu-772c86a260eb446b0fe4232b0a50666511bef25c.tar.gz
yuzu-772c86a260eb446b0fe4232b0a50666511bef25c.tar.bz2
yuzu-772c86a260eb446b0fe4232b0a50666511bef25c.tar.lz
yuzu-772c86a260eb446b0fe4232b0a50666511bef25c.tar.xz
yuzu-772c86a260eb446b0fe4232b0a50666511bef25c.tar.zst
yuzu-772c86a260eb446b0fe4232b0a50666511bef25c.zip
Diffstat (limited to 'src/video_core/texture_cache')
-rw-r--r--src/video_core/texture_cache/copy_params.h36
-rw-r--r--src/video_core/texture_cache/surface_base.cpp300
-rw-r--r--src/video_core/texture_cache/surface_base.h317
-rw-r--r--src/video_core/texture_cache/surface_params.cpp334
-rw-r--r--src/video_core/texture_cache/surface_params.h286
-rw-r--r--src/video_core/texture_cache/surface_view.cpp23
-rw-r--r--src/video_core/texture_cache/surface_view.h67
-rw-r--r--src/video_core/texture_cache/texture_cache.h814
8 files changed, 2177 insertions, 0 deletions
diff --git a/src/video_core/texture_cache/copy_params.h b/src/video_core/texture_cache/copy_params.h
new file mode 100644
index 000000000..9c21a0649
--- /dev/null
+++ b/src/video_core/texture_cache/copy_params.h
@@ -0,0 +1,36 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include "common/common_types.h"
+
+namespace VideoCommon {
+
+struct CopyParams {
+ constexpr CopyParams(u32 source_x, u32 source_y, u32 source_z, u32 dest_x, u32 dest_y,
+ u32 dest_z, u32 source_level, u32 dest_level, u32 width, u32 height,
+ u32 depth)
+ : source_x{source_x}, source_y{source_y}, source_z{source_z}, dest_x{dest_x},
+ dest_y{dest_y}, dest_z{dest_z}, source_level{source_level},
+ dest_level{dest_level}, width{width}, height{height}, depth{depth} {}
+
+ constexpr CopyParams(u32 width, u32 height, u32 depth, u32 level)
+ : source_x{}, source_y{}, source_z{}, dest_x{}, dest_y{}, dest_z{}, source_level{level},
+ dest_level{level}, width{width}, height{height}, depth{depth} {}
+
+ u32 source_x;
+ u32 source_y;
+ u32 source_z;
+ u32 dest_x;
+ u32 dest_y;
+ u32 dest_z;
+ u32 source_level;
+ u32 dest_level;
+ u32 width;
+ u32 height;
+ u32 depth;
+};
+
+} // namespace VideoCommon
diff --git a/src/video_core/texture_cache/surface_base.cpp b/src/video_core/texture_cache/surface_base.cpp
new file mode 100644
index 000000000..7a0fdb19b
--- /dev/null
+++ b/src/video_core/texture_cache/surface_base.cpp
@@ -0,0 +1,300 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include "common/assert.h"
+#include "common/common_types.h"
+#include "common/microprofile.h"
+#include "video_core/memory_manager.h"
+#include "video_core/texture_cache/surface_base.h"
+#include "video_core/texture_cache/surface_params.h"
+#include "video_core/textures/convert.h"
+
+namespace VideoCommon {
+
+MICROPROFILE_DEFINE(GPU_Load_Texture, "GPU", "Texture Load", MP_RGB(128, 192, 128));
+MICROPROFILE_DEFINE(GPU_Flush_Texture, "GPU", "Texture Flush", MP_RGB(128, 192, 128));
+
+using Tegra::Texture::ConvertFromGuestToHost;
+using VideoCore::MortonSwizzleMode;
+using VideoCore::Surface::SurfaceCompression;
+
+StagingCache::StagingCache() = default;
+
+StagingCache::~StagingCache() = default;
+
+SurfaceBaseImpl::SurfaceBaseImpl(GPUVAddr gpu_addr, const SurfaceParams& params)
+ : params{params}, mipmap_sizes(params.num_levels),
+ mipmap_offsets(params.num_levels), gpu_addr{gpu_addr}, host_memory_size{
+ params.GetHostSizeInBytes()} {
+ std::size_t offset = 0;
+ for (u32 level = 0; level < params.num_levels; ++level) {
+ const std::size_t mipmap_size{params.GetGuestMipmapSize(level)};
+ mipmap_sizes[level] = mipmap_size;
+ mipmap_offsets[level] = offset;
+ offset += mipmap_size;
+ }
+ layer_size = offset;
+ if (params.is_layered) {
+ if (params.is_tiled) {
+ layer_size =
+ SurfaceParams::AlignLayered(layer_size, params.block_height, params.block_depth);
+ }
+ guest_memory_size = layer_size * params.depth;
+ } else {
+ guest_memory_size = layer_size;
+ }
+}
+
+MatchTopologyResult SurfaceBaseImpl::MatchesTopology(const SurfaceParams& rhs) const {
+ const u32 src_bpp{params.GetBytesPerPixel()};
+ const u32 dst_bpp{rhs.GetBytesPerPixel()};
+ const bool ib1 = params.IsBuffer();
+ const bool ib2 = rhs.IsBuffer();
+ if (std::tie(src_bpp, params.is_tiled, ib1) == std::tie(dst_bpp, rhs.is_tiled, ib2)) {
+ const bool cb1 = params.IsCompressed();
+ const bool cb2 = rhs.IsCompressed();
+ if (cb1 == cb2) {
+ return MatchTopologyResult::FullMatch;
+ }
+ return MatchTopologyResult::CompressUnmatch;
+ }
+ return MatchTopologyResult::None;
+}
+
+MatchStructureResult SurfaceBaseImpl::MatchesStructure(const SurfaceParams& rhs) const {
+ // Buffer surface Check
+ if (params.IsBuffer()) {
+ const std::size_t wd1 = params.width * params.GetBytesPerPixel();
+ const std::size_t wd2 = rhs.width * rhs.GetBytesPerPixel();
+ if (wd1 == wd2) {
+ return MatchStructureResult::FullMatch;
+ }
+ return MatchStructureResult::None;
+ }
+
+ // Linear Surface check
+ if (!params.is_tiled) {
+ if (std::tie(params.width, params.height, params.pitch) ==
+ std::tie(rhs.width, rhs.height, rhs.pitch)) {
+ return MatchStructureResult::FullMatch;
+ }
+ return MatchStructureResult::None;
+ }
+
+ // Tiled Surface check
+ if (std::tie(params.depth, params.block_width, params.block_height, params.block_depth,
+ params.tile_width_spacing, params.num_levels) ==
+ std::tie(rhs.depth, rhs.block_width, rhs.block_height, rhs.block_depth,
+ rhs.tile_width_spacing, rhs.num_levels)) {
+ if (std::tie(params.width, params.height) == std::tie(rhs.width, rhs.height)) {
+ return MatchStructureResult::FullMatch;
+ }
+ const u32 ws = SurfaceParams::ConvertWidth(rhs.GetBlockAlignedWidth(), params.pixel_format,
+ rhs.pixel_format);
+ const u32 hs =
+ SurfaceParams::ConvertHeight(rhs.height, params.pixel_format, rhs.pixel_format);
+ const u32 w1 = params.GetBlockAlignedWidth();
+ if (std::tie(w1, params.height) == std::tie(ws, hs)) {
+ return MatchStructureResult::SemiMatch;
+ }
+ }
+ return MatchStructureResult::None;
+}
+
+std::optional<std::pair<u32, u32>> SurfaceBaseImpl::GetLayerMipmap(
+ const GPUVAddr candidate_gpu_addr) const {
+ if (gpu_addr == candidate_gpu_addr) {
+ return {{0, 0}};
+ }
+ if (candidate_gpu_addr < gpu_addr) {
+ return {};
+ }
+ const auto relative_address{static_cast<GPUVAddr>(candidate_gpu_addr - gpu_addr)};
+ const auto layer{static_cast<u32>(relative_address / layer_size)};
+ const GPUVAddr mipmap_address = relative_address - layer_size * layer;
+ const auto mipmap_it =
+ Common::BinaryFind(mipmap_offsets.begin(), mipmap_offsets.end(), mipmap_address);
+ if (mipmap_it == mipmap_offsets.end()) {
+ return {};
+ }
+ const auto level{static_cast<u32>(std::distance(mipmap_offsets.begin(), mipmap_it))};
+ return std::make_pair(layer, level);
+}
+
+std::vector<CopyParams> SurfaceBaseImpl::BreakDownLayered(const SurfaceParams& in_params) const {
+ const u32 layers{params.depth};
+ const u32 mipmaps{params.num_levels};
+ std::vector<CopyParams> result;
+ result.reserve(static_cast<std::size_t>(layers) * static_cast<std::size_t>(mipmaps));
+
+ for (u32 layer = 0; layer < layers; layer++) {
+ for (u32 level = 0; level < mipmaps; level++) {
+ const u32 width = SurfaceParams::IntersectWidth(params, in_params, level, level);
+ const u32 height = SurfaceParams::IntersectHeight(params, in_params, level, level);
+ result.emplace_back(width, height, layer, level);
+ }
+ }
+ return result;
+}
+
+std::vector<CopyParams> SurfaceBaseImpl::BreakDownNonLayered(const SurfaceParams& in_params) const {
+ const u32 mipmaps{params.num_levels};
+ std::vector<CopyParams> result;
+ result.reserve(mipmaps);
+
+ for (u32 level = 0; level < mipmaps; level++) {
+ const u32 width = SurfaceParams::IntersectWidth(params, in_params, level, level);
+ const u32 height = SurfaceParams::IntersectHeight(params, in_params, level, level);
+ const u32 depth{std::min(params.GetMipDepth(level), in_params.GetMipDepth(level))};
+ result.emplace_back(width, height, depth, level);
+ }
+ return result;
+}
+
+void SurfaceBaseImpl::SwizzleFunc(MortonSwizzleMode mode, u8* memory, const SurfaceParams& params,
+ u8* buffer, u32 level) {
+ const u32 width{params.GetMipWidth(level)};
+ const u32 height{params.GetMipHeight(level)};
+ const u32 block_height{params.GetMipBlockHeight(level)};
+ const u32 block_depth{params.GetMipBlockDepth(level)};
+
+ std::size_t guest_offset{mipmap_offsets[level]};
+ if (params.is_layered) {
+ std::size_t host_offset{0};
+ const std::size_t guest_stride = layer_size;
+ const std::size_t host_stride = params.GetHostLayerSize(level);
+ for (u32 layer = 0; layer < params.depth; ++layer) {
+ MortonSwizzle(mode, params.pixel_format, width, block_height, height, block_depth, 1,
+ params.tile_width_spacing, buffer + host_offset, memory + guest_offset);
+ guest_offset += guest_stride;
+ host_offset += host_stride;
+ }
+ } else {
+ MortonSwizzle(mode, params.pixel_format, width, block_height, height, block_depth,
+ params.GetMipDepth(level), params.tile_width_spacing, buffer,
+ memory + guest_offset);
+ }
+}
+
+void SurfaceBaseImpl::LoadBuffer(Tegra::MemoryManager& memory_manager,
+ StagingCache& staging_cache) {
+ MICROPROFILE_SCOPE(GPU_Load_Texture);
+ auto& staging_buffer = staging_cache.GetBuffer(0);
+ u8* host_ptr;
+ is_continuous = memory_manager.IsBlockContinuous(gpu_addr, guest_memory_size);
+
+ // Handle continuouty
+ if (is_continuous) {
+ // Use physical memory directly
+ host_ptr = memory_manager.GetPointer(gpu_addr);
+ if (!host_ptr) {
+ return;
+ }
+ } else {
+ // Use an extra temporal buffer
+ auto& tmp_buffer = staging_cache.GetBuffer(1);
+ tmp_buffer.resize(guest_memory_size);
+ host_ptr = tmp_buffer.data();
+ memory_manager.ReadBlockUnsafe(gpu_addr, host_ptr, guest_memory_size);
+ }
+
+ if (params.is_tiled) {
+ ASSERT_MSG(params.block_width == 0, "Block width is defined as {} on texture target {}",
+ params.block_width, static_cast<u32>(params.target));
+ for (u32 level = 0; level < params.num_levels; ++level) {
+ const std::size_t host_offset{params.GetHostMipmapLevelOffset(level)};
+ SwizzleFunc(MortonSwizzleMode::MortonToLinear, host_ptr, params,
+ staging_buffer.data() + host_offset, level);
+ }
+ } else {
+ ASSERT_MSG(params.num_levels == 1, "Linear mipmap loading is not implemented");
+ const u32 bpp{params.GetBytesPerPixel()};
+ const u32 block_width{params.GetDefaultBlockWidth()};
+ const u32 block_height{params.GetDefaultBlockHeight()};
+ const u32 width{(params.width + block_width - 1) / block_width};
+ const u32 height{(params.height + block_height - 1) / block_height};
+ const u32 copy_size{width * bpp};
+ if (params.pitch == copy_size) {
+ std::memcpy(staging_buffer.data(), host_ptr, params.GetHostSizeInBytes());
+ } else {
+ const u8* start{host_ptr};
+ u8* write_to{staging_buffer.data()};
+ for (u32 h = height; h > 0; --h) {
+ std::memcpy(write_to, start, copy_size);
+ start += params.pitch;
+ write_to += copy_size;
+ }
+ }
+ }
+
+ auto compression_type = params.GetCompressionType();
+ if (compression_type == SurfaceCompression::None ||
+ compression_type == SurfaceCompression::Compressed)
+ return;
+
+ for (u32 level_up = params.num_levels; level_up > 0; --level_up) {
+ const u32 level = level_up - 1;
+ const std::size_t in_host_offset{params.GetHostMipmapLevelOffset(level)};
+ const std::size_t out_host_offset = compression_type == SurfaceCompression::Rearranged
+ ? in_host_offset
+ : params.GetConvertedMipmapOffset(level);
+ u8* in_buffer = staging_buffer.data() + in_host_offset;
+ u8* out_buffer = staging_buffer.data() + out_host_offset;
+ ConvertFromGuestToHost(in_buffer, out_buffer, params.pixel_format,
+ params.GetMipWidth(level), params.GetMipHeight(level),
+ params.GetMipDepth(level), true, true);
+ }
+}
+
+void SurfaceBaseImpl::FlushBuffer(Tegra::MemoryManager& memory_manager,
+ StagingCache& staging_cache) {
+ MICROPROFILE_SCOPE(GPU_Flush_Texture);
+ auto& staging_buffer = staging_cache.GetBuffer(0);
+ u8* host_ptr;
+
+ // Handle continuouty
+ if (is_continuous) {
+ // Use physical memory directly
+ host_ptr = memory_manager.GetPointer(gpu_addr);
+ if (!host_ptr) {
+ return;
+ }
+ } else {
+ // Use an extra temporal buffer
+ auto& tmp_buffer = staging_cache.GetBuffer(1);
+ tmp_buffer.resize(guest_memory_size);
+ host_ptr = tmp_buffer.data();
+ }
+
+ if (params.is_tiled) {
+ ASSERT_MSG(params.block_width == 0, "Block width is defined as {}", params.block_width);
+ for (u32 level = 0; level < params.num_levels; ++level) {
+ const std::size_t host_offset{params.GetHostMipmapLevelOffset(level)};
+ SwizzleFunc(MortonSwizzleMode::LinearToMorton, host_ptr, params,
+ staging_buffer.data() + host_offset, level);
+ }
+ } else {
+ ASSERT(params.target == SurfaceTarget::Texture2D);
+ ASSERT(params.num_levels == 1);
+
+ const u32 bpp{params.GetBytesPerPixel()};
+ const u32 copy_size{params.width * bpp};
+ if (params.pitch == copy_size) {
+ std::memcpy(host_ptr, staging_buffer.data(), guest_memory_size);
+ } else {
+ u8* start{host_ptr};
+ const u8* read_to{staging_buffer.data()};
+ for (u32 h = params.height; h > 0; --h) {
+ std::memcpy(start, read_to, copy_size);
+ start += params.pitch;
+ read_to += copy_size;
+ }
+ }
+ }
+ if (!is_continuous) {
+ memory_manager.WriteBlockUnsafe(gpu_addr, host_ptr, guest_memory_size);
+ }
+}
+
+} // namespace VideoCommon
diff --git a/src/video_core/texture_cache/surface_base.h b/src/video_core/texture_cache/surface_base.h
new file mode 100644
index 000000000..8ba386a8a
--- /dev/null
+++ b/src/video_core/texture_cache/surface_base.h
@@ -0,0 +1,317 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <algorithm>
+#include <unordered_map>
+#include <vector>
+
+#include "common/assert.h"
+#include "common/binary_find.h"
+#include "common/common_types.h"
+#include "video_core/gpu.h"
+#include "video_core/morton.h"
+#include "video_core/texture_cache/copy_params.h"
+#include "video_core/texture_cache/surface_params.h"
+#include "video_core/texture_cache/surface_view.h"
+
+namespace Tegra {
+class MemoryManager;
+}
+
+namespace VideoCommon {
+
+using VideoCore::MortonSwizzleMode;
+using VideoCore::Surface::SurfaceTarget;
+
+enum class MatchStructureResult : u32 {
+ FullMatch = 0,
+ SemiMatch = 1,
+ None = 2,
+};
+
+enum class MatchTopologyResult : u32 {
+ FullMatch = 0,
+ CompressUnmatch = 1,
+ None = 2,
+};
+
+class StagingCache {
+public:
+ explicit StagingCache();
+ ~StagingCache();
+
+ std::vector<u8>& GetBuffer(std::size_t index) {
+ return staging_buffer[index];
+ }
+
+ const std::vector<u8>& GetBuffer(std::size_t index) const {
+ return staging_buffer[index];
+ }
+
+ void SetSize(std::size_t size) {
+ staging_buffer.resize(size);
+ }
+
+private:
+ std::vector<std::vector<u8>> staging_buffer;
+};
+
+class SurfaceBaseImpl {
+public:
+ void LoadBuffer(Tegra::MemoryManager& memory_manager, StagingCache& staging_cache);
+
+ void FlushBuffer(Tegra::MemoryManager& memory_manager, StagingCache& staging_cache);
+
+ GPUVAddr GetGpuAddr() const {
+ return gpu_addr;
+ }
+
+ bool Overlaps(const CacheAddr start, const CacheAddr end) const {
+ return (cache_addr < end) && (cache_addr_end > start);
+ }
+
+ bool IsInside(const GPUVAddr other_start, const GPUVAddr other_end) {
+ const GPUVAddr gpu_addr_end = gpu_addr + guest_memory_size;
+ return (gpu_addr <= other_start && other_end <= gpu_addr_end);
+ }
+
+ // Use only when recycling a surface
+ void SetGpuAddr(const GPUVAddr new_addr) {
+ gpu_addr = new_addr;
+ }
+
+ VAddr GetCpuAddr() const {
+ return cpu_addr;
+ }
+
+ void SetCpuAddr(const VAddr new_addr) {
+ cpu_addr = new_addr;
+ }
+
+ CacheAddr GetCacheAddr() const {
+ return cache_addr;
+ }
+
+ CacheAddr GetCacheAddrEnd() const {
+ return cache_addr_end;
+ }
+
+ void SetCacheAddr(const CacheAddr new_addr) {
+ cache_addr = new_addr;
+ cache_addr_end = new_addr + guest_memory_size;
+ }
+
+ const SurfaceParams& GetSurfaceParams() const {
+ return params;
+ }
+
+ std::size_t GetSizeInBytes() const {
+ return guest_memory_size;
+ }
+
+ std::size_t GetHostSizeInBytes() const {
+ return host_memory_size;
+ }
+
+ std::size_t GetMipmapSize(const u32 level) const {
+ return mipmap_sizes[level];
+ }
+
+ void MarkAsContinuous(const bool is_continuous) {
+ this->is_continuous = is_continuous;
+ }
+
+ bool IsContinuous() const {
+ return is_continuous;
+ }
+
+ bool IsLinear() const {
+ return !params.is_tiled;
+ }
+
+ bool MatchFormat(VideoCore::Surface::PixelFormat pixel_format) const {
+ return params.pixel_format == pixel_format;
+ }
+
+ VideoCore::Surface::PixelFormat GetFormat() const {
+ return params.pixel_format;
+ }
+
+ bool MatchTarget(VideoCore::Surface::SurfaceTarget target) const {
+ return params.target == target;
+ }
+
+ MatchTopologyResult MatchesTopology(const SurfaceParams& rhs) const;
+
+ MatchStructureResult MatchesStructure(const SurfaceParams& rhs) const;
+
+ bool MatchesSubTexture(const SurfaceParams& rhs, const GPUVAddr other_gpu_addr) const {
+ return std::tie(gpu_addr, params.target, params.num_levels) ==
+ std::tie(other_gpu_addr, rhs.target, rhs.num_levels) &&
+ params.target == SurfaceTarget::Texture2D && params.num_levels == 1;
+ }
+
+ std::optional<std::pair<u32, u32>> GetLayerMipmap(const GPUVAddr candidate_gpu_addr) const;
+
+ std::vector<CopyParams> BreakDown(const SurfaceParams& in_params) const {
+ return params.is_layered ? BreakDownLayered(in_params) : BreakDownNonLayered(in_params);
+ }
+
+protected:
+ explicit SurfaceBaseImpl(GPUVAddr gpu_addr, const SurfaceParams& params);
+ ~SurfaceBaseImpl() = default;
+
+ virtual void DecorateSurfaceName() = 0;
+
+ const SurfaceParams params;
+ std::size_t layer_size;
+ std::size_t guest_memory_size;
+ const std::size_t host_memory_size;
+ GPUVAddr gpu_addr{};
+ CacheAddr cache_addr{};
+ CacheAddr cache_addr_end{};
+ VAddr cpu_addr{};
+ bool is_continuous{};
+
+ std::vector<std::size_t> mipmap_sizes;
+ std::vector<std::size_t> mipmap_offsets;
+
+private:
+ void SwizzleFunc(MortonSwizzleMode mode, u8* memory, const SurfaceParams& params, u8* buffer,
+ u32 level);
+
+ std::vector<CopyParams> BreakDownLayered(const SurfaceParams& in_params) const;
+
+ std::vector<CopyParams> BreakDownNonLayered(const SurfaceParams& in_params) const;
+};
+
+template <typename TView>
+class SurfaceBase : public SurfaceBaseImpl {
+public:
+ virtual void UploadTexture(const std::vector<u8>& staging_buffer) = 0;
+
+ virtual void DownloadTexture(std::vector<u8>& staging_buffer) = 0;
+
+ void MarkAsModified(const bool is_modified_, const u64 tick) {
+ is_modified = is_modified_ || is_target;
+ modification_tick = tick;
+ }
+
+ void MarkAsRenderTarget(const bool is_target) {
+ this->is_target = is_target;
+ }
+
+ void MarkAsPicked(const bool is_picked) {
+ this->is_picked = is_picked;
+ }
+
+ bool IsModified() const {
+ return is_modified;
+ }
+
+ bool IsProtected() const {
+ // Only 3D Slices are to be protected
+ return is_target && params.block_depth > 0;
+ }
+
+ bool IsRenderTarget() const {
+ return is_target;
+ }
+
+ bool IsRegistered() const {
+ return is_registered;
+ }
+
+ bool IsPicked() const {
+ return is_picked;
+ }
+
+ void MarkAsRegistered(bool is_reg) {
+ is_registered = is_reg;
+ }
+
+ u64 GetModificationTick() const {
+ return modification_tick;
+ }
+
+ TView EmplaceOverview(const SurfaceParams& overview_params) {
+ const u32 num_layers{(params.is_layered && !overview_params.is_layered) ? 1 : params.depth};
+ return GetView(ViewParams(overview_params.target, 0, num_layers, 0, params.num_levels));
+ }
+
+ std::optional<TView> EmplaceIrregularView(const SurfaceParams& view_params,
+ const GPUVAddr view_addr,
+ const std::size_t candidate_size, const u32 mipmap,
+ const u32 layer) {
+ const auto layer_mipmap{GetLayerMipmap(view_addr + candidate_size)};
+ if (!layer_mipmap) {
+ return {};
+ }
+ const u32 end_layer{layer_mipmap->first};
+ const u32 end_mipmap{layer_mipmap->second};
+ if (layer != end_layer) {
+ if (mipmap == 0 && end_mipmap == 0) {
+ return GetView(ViewParams(view_params.target, layer, end_layer - layer + 1, 0, 1));
+ }
+ return {};
+ } else {
+ return GetView(
+ ViewParams(view_params.target, layer, 1, mipmap, end_mipmap - mipmap + 1));
+ }
+ }
+
+ std::optional<TView> EmplaceView(const SurfaceParams& view_params, const GPUVAddr view_addr,
+ const std::size_t candidate_size) {
+ if (params.target == SurfaceTarget::Texture3D ||
+ (params.num_levels == 1 && !params.is_layered) ||
+ view_params.target == SurfaceTarget::Texture3D) {
+ return {};
+ }
+ const auto layer_mipmap{GetLayerMipmap(view_addr)};
+ if (!layer_mipmap) {
+ return {};
+ }
+ const u32 layer{layer_mipmap->first};
+ const u32 mipmap{layer_mipmap->second};
+ if (GetMipmapSize(mipmap) != candidate_size) {
+ return EmplaceIrregularView(view_params, view_addr, candidate_size, mipmap, layer);
+ }
+ return GetView(ViewParams(view_params.target, layer, 1, mipmap, 1));
+ }
+
+ TView GetMainView() const {
+ return main_view;
+ }
+
+protected:
+ explicit SurfaceBase(const GPUVAddr gpu_addr, const SurfaceParams& params)
+ : SurfaceBaseImpl(gpu_addr, params) {}
+
+ ~SurfaceBase() = default;
+
+ virtual TView CreateView(const ViewParams& view_key) = 0;
+
+ TView main_view;
+ std::unordered_map<ViewParams, TView> views;
+
+private:
+ TView GetView(const ViewParams& key) {
+ const auto [entry, is_cache_miss] = views.try_emplace(key);
+ auto& view{entry->second};
+ if (is_cache_miss) {
+ view = CreateView(key);
+ }
+ return view;
+ }
+
+ bool is_modified{};
+ bool is_target{};
+ bool is_registered{};
+ bool is_picked{};
+ u64 modification_tick{};
+};
+
+} // namespace VideoCommon
diff --git a/src/video_core/texture_cache/surface_params.cpp b/src/video_core/texture_cache/surface_params.cpp
new file mode 100644
index 000000000..9c56e2b4f
--- /dev/null
+++ b/src/video_core/texture_cache/surface_params.cpp
@@ -0,0 +1,334 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include <map>
+
+#include "common/alignment.h"
+#include "common/bit_util.h"
+#include "core/core.h"
+#include "video_core/engines/shader_bytecode.h"
+#include "video_core/surface.h"
+#include "video_core/texture_cache/surface_params.h"
+
+namespace VideoCommon {
+
+using VideoCore::Surface::ComponentTypeFromDepthFormat;
+using VideoCore::Surface::ComponentTypeFromRenderTarget;
+using VideoCore::Surface::ComponentTypeFromTexture;
+using VideoCore::Surface::PixelFormat;
+using VideoCore::Surface::PixelFormatFromDepthFormat;
+using VideoCore::Surface::PixelFormatFromRenderTargetFormat;
+using VideoCore::Surface::PixelFormatFromTextureFormat;
+using VideoCore::Surface::SurfaceTarget;
+using VideoCore::Surface::SurfaceTargetFromTextureType;
+using VideoCore::Surface::SurfaceType;
+
+SurfaceTarget TextureType2SurfaceTarget(Tegra::Shader::TextureType type, bool is_array) {
+ switch (type) {
+ case Tegra::Shader::TextureType::Texture1D: {
+ if (is_array)
+ return SurfaceTarget::Texture1DArray;
+ else
+ return SurfaceTarget::Texture1D;
+ }
+ case Tegra::Shader::TextureType::Texture2D: {
+ if (is_array)
+ return SurfaceTarget::Texture2DArray;
+ else
+ return SurfaceTarget::Texture2D;
+ }
+ case Tegra::Shader::TextureType::Texture3D: {
+ ASSERT(!is_array);
+ return SurfaceTarget::Texture3D;
+ }
+ case Tegra::Shader::TextureType::TextureCube: {
+ if (is_array)
+ return SurfaceTarget::TextureCubeArray;
+ else
+ return SurfaceTarget::TextureCubemap;
+ }
+ default: {
+ UNREACHABLE();
+ return SurfaceTarget::Texture2D;
+ }
+ }
+}
+
+namespace {
+constexpr u32 GetMipmapSize(bool uncompressed, u32 mip_size, u32 tile) {
+ return uncompressed ? mip_size : std::max(1U, (mip_size + tile - 1) / tile);
+}
+} // Anonymous namespace
+
+SurfaceParams SurfaceParams::CreateForTexture(Core::System& system,
+ const Tegra::Texture::FullTextureInfo& config,
+ const VideoCommon::Shader::Sampler& entry) {
+ SurfaceParams params;
+ params.is_tiled = config.tic.IsTiled();
+ params.srgb_conversion = config.tic.IsSrgbConversionEnabled();
+ params.block_width = params.is_tiled ? config.tic.BlockWidth() : 0,
+ params.block_height = params.is_tiled ? config.tic.BlockHeight() : 0,
+ params.block_depth = params.is_tiled ? config.tic.BlockDepth() : 0,
+ params.tile_width_spacing = params.is_tiled ? (1 << config.tic.tile_width_spacing.Value()) : 1;
+ params.pixel_format = PixelFormatFromTextureFormat(config.tic.format, config.tic.r_type.Value(),
+ params.srgb_conversion);
+ params.type = GetFormatType(params.pixel_format);
+ if (entry.IsShadow() && params.type == SurfaceType::ColorTexture) {
+ switch (params.pixel_format) {
+ case PixelFormat::R16U:
+ case PixelFormat::R16F: {
+ params.pixel_format = PixelFormat::Z16;
+ break;
+ }
+ case PixelFormat::R32F: {
+ params.pixel_format = PixelFormat::Z32F;
+ break;
+ }
+ default: {
+ UNIMPLEMENTED_MSG("Unimplemented shadow convert format: {}",
+ static_cast<u32>(params.pixel_format));
+ }
+ }
+ params.type = GetFormatType(params.pixel_format);
+ }
+ params.component_type = ComponentTypeFromTexture(config.tic.r_type.Value());
+ params.type = GetFormatType(params.pixel_format);
+ // TODO: on 1DBuffer we should use the tic info.
+ if (!config.tic.IsBuffer()) {
+ params.target = TextureType2SurfaceTarget(entry.GetType(), entry.IsArray());
+ params.width = config.tic.Width();
+ params.height = config.tic.Height();
+ params.depth = config.tic.Depth();
+ params.pitch = params.is_tiled ? 0 : config.tic.Pitch();
+ if (params.target == SurfaceTarget::TextureCubemap ||
+ params.target == SurfaceTarget::TextureCubeArray) {
+ params.depth *= 6;
+ }
+ params.num_levels = config.tic.max_mip_level + 1;
+ params.emulated_levels = std::min(params.num_levels, params.MaxPossibleMipmap());
+ params.is_layered = params.IsLayered();
+ } else {
+ params.target = SurfaceTarget::TextureBuffer;
+ params.width = config.tic.Width();
+ params.pitch = params.width * params.GetBytesPerPixel();
+ params.height = 1;
+ params.depth = 1;
+ params.num_levels = 1;
+ params.emulated_levels = 1;
+ params.is_layered = false;
+ }
+ return params;
+}
+
+SurfaceParams SurfaceParams::CreateForDepthBuffer(
+ Core::System& system, u32 zeta_width, u32 zeta_height, Tegra::DepthFormat format,
+ u32 block_width, u32 block_height, u32 block_depth,
+ Tegra::Engines::Maxwell3D::Regs::InvMemoryLayout type) {
+ SurfaceParams params;
+ params.is_tiled = type == Tegra::Engines::Maxwell3D::Regs::InvMemoryLayout::BlockLinear;
+ params.srgb_conversion = false;
+ params.block_width = std::min(block_width, 5U);
+ params.block_height = std::min(block_height, 5U);
+ params.block_depth = std::min(block_depth, 5U);
+ params.tile_width_spacing = 1;
+ params.pixel_format = PixelFormatFromDepthFormat(format);
+ params.component_type = ComponentTypeFromDepthFormat(format);
+ params.type = GetFormatType(params.pixel_format);
+ params.width = zeta_width;
+ params.height = zeta_height;
+ params.target = SurfaceTarget::Texture2D;
+ params.depth = 1;
+ params.pitch = 0;
+ params.num_levels = 1;
+ params.emulated_levels = 1;
+ params.is_layered = false;
+ return params;
+}
+
+SurfaceParams SurfaceParams::CreateForFramebuffer(Core::System& system, std::size_t index) {
+ const auto& config{system.GPU().Maxwell3D().regs.rt[index]};
+ SurfaceParams params;
+ params.is_tiled =
+ config.memory_layout.type == Tegra::Engines::Maxwell3D::Regs::InvMemoryLayout::BlockLinear;
+ params.srgb_conversion = config.format == Tegra::RenderTargetFormat::BGRA8_SRGB ||
+ config.format == Tegra::RenderTargetFormat::RGBA8_SRGB;
+ params.block_width = config.memory_layout.block_width;
+ params.block_height = config.memory_layout.block_height;
+ params.block_depth = config.memory_layout.block_depth;
+ params.tile_width_spacing = 1;
+ params.pixel_format = PixelFormatFromRenderTargetFormat(config.format);
+ params.component_type = ComponentTypeFromRenderTarget(config.format);
+ params.type = GetFormatType(params.pixel_format);
+ if (params.is_tiled) {
+ params.pitch = 0;
+ params.width = config.width;
+ } else {
+ const u32 bpp = GetFormatBpp(params.pixel_format) / CHAR_BIT;
+ params.pitch = config.width;
+ params.width = params.pitch / bpp;
+ }
+ params.height = config.height;
+ params.depth = 1;
+ params.target = SurfaceTarget::Texture2D;
+ params.num_levels = 1;
+ params.emulated_levels = 1;
+ params.is_layered = false;
+ return params;
+}
+
+SurfaceParams SurfaceParams::CreateForFermiCopySurface(
+ const Tegra::Engines::Fermi2D::Regs::Surface& config) {
+ SurfaceParams params{};
+ params.is_tiled = !config.linear;
+ params.srgb_conversion = config.format == Tegra::RenderTargetFormat::BGRA8_SRGB ||
+ config.format == Tegra::RenderTargetFormat::RGBA8_SRGB;
+ params.block_width = params.is_tiled ? std::min(config.BlockWidth(), 5U) : 0,
+ params.block_height = params.is_tiled ? std::min(config.BlockHeight(), 5U) : 0,
+ params.block_depth = params.is_tiled ? std::min(config.BlockDepth(), 5U) : 0,
+ params.tile_width_spacing = 1;
+ params.pixel_format = PixelFormatFromRenderTargetFormat(config.format);
+ params.component_type = ComponentTypeFromRenderTarget(config.format);
+ params.type = GetFormatType(params.pixel_format);
+ params.width = config.width;
+ params.height = config.height;
+ params.pitch = config.pitch;
+ // TODO(Rodrigo): Try to guess the surface target from depth and layer parameters
+ params.target = SurfaceTarget::Texture2D;
+ params.depth = 1;
+ params.num_levels = 1;
+ params.emulated_levels = 1;
+ params.is_layered = params.IsLayered();
+ return params;
+}
+
+bool SurfaceParams::IsLayered() const {
+ switch (target) {
+ case SurfaceTarget::Texture1DArray:
+ case SurfaceTarget::Texture2DArray:
+ case SurfaceTarget::TextureCubemap:
+ case SurfaceTarget::TextureCubeArray:
+ return true;
+ default:
+ return false;
+ }
+}
+
+// Auto block resizing algorithm from:
+// https://cgit.freedesktop.org/mesa/mesa/tree/src/gallium/drivers/nouveau/nv50/nv50_miptree.c
+u32 SurfaceParams::GetMipBlockHeight(u32 level) const {
+ if (level == 0) {
+ return this->block_height;
+ }
+
+ const u32 height_new{GetMipHeight(level)};
+ const u32 default_block_height{GetDefaultBlockHeight()};
+ const u32 blocks_in_y{(height_new + default_block_height - 1) / default_block_height};
+ const u32 block_height_new = Common::Log2Ceil32(blocks_in_y);
+ return std::clamp(block_height_new, 3U, 7U) - 3U;
+}
+
+u32 SurfaceParams::GetMipBlockDepth(u32 level) const {
+ if (level == 0) {
+ return this->block_depth;
+ }
+ if (is_layered) {
+ return 0;
+ }
+
+ const u32 depth_new{GetMipDepth(level)};
+ const u32 block_depth_new = Common::Log2Ceil32(depth_new);
+ if (block_depth_new > 4) {
+ return 5 - (GetMipBlockHeight(level) >= 2);
+ }
+ return block_depth_new;
+}
+
+std::size_t SurfaceParams::GetGuestMipmapLevelOffset(u32 level) const {
+ std::size_t offset = 0;
+ for (u32 i = 0; i < level; i++) {
+ offset += GetInnerMipmapMemorySize(i, false, false);
+ }
+ return offset;
+}
+
+std::size_t SurfaceParams::GetHostMipmapLevelOffset(u32 level) const {
+ std::size_t offset = 0;
+ for (u32 i = 0; i < level; i++) {
+ offset += GetInnerMipmapMemorySize(i, true, false) * GetNumLayers();
+ }
+ return offset;
+}
+
+std::size_t SurfaceParams::GetConvertedMipmapOffset(u32 level) const {
+ std::size_t offset = 0;
+ for (u32 i = 0; i < level; i++) {
+ offset += GetConvertedMipmapSize(i);
+ }
+ return offset;
+}
+
+std::size_t SurfaceParams::GetConvertedMipmapSize(u32 level) const {
+ constexpr std::size_t rgba8_bpp = 4ULL;
+ const std::size_t width_t = GetMipWidth(level);
+ const std::size_t height_t = GetMipHeight(level);
+ const std::size_t depth_t = is_layered ? depth : GetMipDepth(level);
+ return width_t * height_t * depth_t * rgba8_bpp;
+}
+
+std::size_t SurfaceParams::GetLayerSize(bool as_host_size, bool uncompressed) const {
+ std::size_t size = 0;
+ for (u32 level = 0; level < num_levels; ++level) {
+ size += GetInnerMipmapMemorySize(level, as_host_size, uncompressed);
+ }
+ if (is_tiled && is_layered) {
+ return Common::AlignBits(size,
+ Tegra::Texture::GetGOBSizeShift() + block_height + block_depth);
+ }
+ return size;
+}
+
+std::size_t SurfaceParams::GetInnerMipmapMemorySize(u32 level, bool as_host_size,
+ bool uncompressed) const {
+ const bool tiled{as_host_size ? false : is_tiled};
+ const u32 width{GetMipmapSize(uncompressed, GetMipWidth(level), GetDefaultBlockWidth())};
+ const u32 height{GetMipmapSize(uncompressed, GetMipHeight(level), GetDefaultBlockHeight())};
+ const u32 depth{is_layered ? 1U : GetMipDepth(level)};
+ return Tegra::Texture::CalculateSize(tiled, GetBytesPerPixel(), width, height, depth,
+ GetMipBlockHeight(level), GetMipBlockDepth(level));
+}
+
+bool SurfaceParams::operator==(const SurfaceParams& rhs) const {
+ return std::tie(is_tiled, block_width, block_height, block_depth, tile_width_spacing, width,
+ height, depth, pitch, num_levels, pixel_format, component_type, type, target) ==
+ std::tie(rhs.is_tiled, rhs.block_width, rhs.block_height, rhs.block_depth,
+ rhs.tile_width_spacing, rhs.width, rhs.height, rhs.depth, rhs.pitch,
+ rhs.num_levels, rhs.pixel_format, rhs.component_type, rhs.type, rhs.target);
+}
+
+std::string SurfaceParams::TargetName() const {
+ switch (target) {
+ case SurfaceTarget::Texture1D:
+ return "1D";
+ case SurfaceTarget::TextureBuffer:
+ return "TexBuffer";
+ case SurfaceTarget::Texture2D:
+ return "2D";
+ case SurfaceTarget::Texture3D:
+ return "3D";
+ case SurfaceTarget::Texture1DArray:
+ return "1DArray";
+ case SurfaceTarget::Texture2DArray:
+ return "2DArray";
+ case SurfaceTarget::TextureCubemap:
+ return "Cube";
+ case SurfaceTarget::TextureCubeArray:
+ return "CubeArray";
+ default:
+ LOG_CRITICAL(HW_GPU, "Unimplemented surface_target={}", static_cast<u32>(target));
+ UNREACHABLE();
+ return fmt::format("TUK({})", static_cast<u32>(target));
+ }
+}
+
+} // namespace VideoCommon
diff --git a/src/video_core/texture_cache/surface_params.h b/src/video_core/texture_cache/surface_params.h
new file mode 100644
index 000000000..358d6757c
--- /dev/null
+++ b/src/video_core/texture_cache/surface_params.h
@@ -0,0 +1,286 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <map>
+
+#include "common/alignment.h"
+#include "common/bit_util.h"
+#include "common/cityhash.h"
+#include "common/common_types.h"
+#include "video_core/engines/fermi_2d.h"
+#include "video_core/engines/maxwell_3d.h"
+#include "video_core/shader/shader_ir.h"
+#include "video_core/surface.h"
+#include "video_core/textures/decoders.h"
+
+namespace VideoCommon {
+
+using VideoCore::Surface::SurfaceCompression;
+
+class SurfaceParams {
+public:
+ /// Creates SurfaceCachedParams from a texture configuration.
+ static SurfaceParams CreateForTexture(Core::System& system,
+ const Tegra::Texture::FullTextureInfo& config,
+ const VideoCommon::Shader::Sampler& entry);
+
+ /// Creates SurfaceCachedParams for a depth buffer configuration.
+ static SurfaceParams CreateForDepthBuffer(
+ Core::System& system, u32 zeta_width, u32 zeta_height, Tegra::DepthFormat format,
+ u32 block_width, u32 block_height, u32 block_depth,
+ Tegra::Engines::Maxwell3D::Regs::InvMemoryLayout type);
+
+ /// Creates SurfaceCachedParams from a framebuffer configuration.
+ static SurfaceParams CreateForFramebuffer(Core::System& system, std::size_t index);
+
+ /// Creates SurfaceCachedParams from a Fermi2D surface configuration.
+ static SurfaceParams CreateForFermiCopySurface(
+ const Tegra::Engines::Fermi2D::Regs::Surface& config);
+
+ std::size_t Hash() const {
+ return static_cast<std::size_t>(
+ Common::CityHash64(reinterpret_cast<const char*>(this), sizeof(*this)));
+ }
+
+ bool operator==(const SurfaceParams& rhs) const;
+
+ bool operator!=(const SurfaceParams& rhs) const {
+ return !operator==(rhs);
+ }
+
+ std::size_t GetGuestSizeInBytes() const {
+ return GetInnerMemorySize(false, false, false);
+ }
+
+ std::size_t GetHostSizeInBytes() const {
+ std::size_t host_size_in_bytes;
+ if (GetCompressionType() == SurfaceCompression::Converted) {
+ constexpr std::size_t rgb8_bpp = 4ULL;
+ // ASTC is uncompressed in software, in emulated as RGBA8
+ host_size_in_bytes = 0;
+ for (u32 level = 0; level < num_levels; ++level) {
+ host_size_in_bytes += GetConvertedMipmapSize(level);
+ }
+ } else {
+ host_size_in_bytes = GetInnerMemorySize(true, false, false);
+ }
+ return host_size_in_bytes;
+ }
+
+ u32 GetBlockAlignedWidth() const {
+ return Common::AlignUp(width, 64 / GetBytesPerPixel());
+ }
+
+ /// Returns the width of a given mipmap level.
+ u32 GetMipWidth(u32 level) const {
+ return std::max(1U, width >> level);
+ }
+
+ /// Returns the height of a given mipmap level.
+ u32 GetMipHeight(u32 level) const {
+ return std::max(1U, height >> level);
+ }
+
+ /// Returns the depth of a given mipmap level.
+ u32 GetMipDepth(u32 level) const {
+ return is_layered ? depth : std::max(1U, depth >> level);
+ }
+
+ /// Returns the block height of a given mipmap level.
+ u32 GetMipBlockHeight(u32 level) const;
+
+ /// Returns the block depth of a given mipmap level.
+ u32 GetMipBlockDepth(u32 level) const;
+
+ /// Returns the best possible row/pitch alignment for the surface.
+ u32 GetRowAlignment(u32 level) const {
+ const u32 bpp =
+ GetCompressionType() == SurfaceCompression::Converted ? 4 : GetBytesPerPixel();
+ return 1U << Common::CountTrailingZeroes32(GetMipWidth(level) * bpp);
+ }
+
+ /// Returns the offset in bytes in guest memory of a given mipmap level.
+ std::size_t GetGuestMipmapLevelOffset(u32 level) const;
+
+ /// Returns the offset in bytes in host memory (linear) of a given mipmap level.
+ std::size_t GetHostMipmapLevelOffset(u32 level) const;
+
+ /// Returns the offset in bytes in host memory (linear) of a given mipmap level
+ /// for a texture that is converted in host gpu.
+ std::size_t GetConvertedMipmapOffset(u32 level) const;
+
+ /// Returns the size in bytes in guest memory of a given mipmap level.
+ std::size_t GetGuestMipmapSize(u32 level) const {
+ return GetInnerMipmapMemorySize(level, false, false);
+ }
+
+ /// Returns the size in bytes in host memory (linear) of a given mipmap level.
+ std::size_t GetHostMipmapSize(u32 level) const {
+ return GetInnerMipmapMemorySize(level, true, false) * GetNumLayers();
+ }
+
+ std::size_t GetConvertedMipmapSize(u32 level) const;
+
+ /// Returns the size of a layer in bytes in guest memory.
+ std::size_t GetGuestLayerSize() const {
+ return GetLayerSize(false, false);
+ }
+
+ /// Returns the size of a layer in bytes in host memory for a given mipmap level.
+ std::size_t GetHostLayerSize(u32 level) const {
+ ASSERT(target != VideoCore::Surface::SurfaceTarget::Texture3D);
+ return GetInnerMipmapMemorySize(level, true, false);
+ }
+
+ /// Returns the max possible mipmap that the texture can have in host gpu
+ u32 MaxPossibleMipmap() const {
+ const u32 max_mipmap_w = Common::Log2Ceil32(width) + 1U;
+ const u32 max_mipmap_h = Common::Log2Ceil32(height) + 1U;
+ const u32 max_mipmap = std::max(max_mipmap_w, max_mipmap_h);
+ if (target != VideoCore::Surface::SurfaceTarget::Texture3D)
+ return max_mipmap;
+ return std::max(max_mipmap, Common::Log2Ceil32(depth) + 1U);
+ }
+
+ /// Returns if the guest surface is a compressed surface.
+ bool IsCompressed() const {
+ return GetDefaultBlockHeight() > 1 || GetDefaultBlockWidth() > 1;
+ }
+
+ /// Returns the default block width.
+ u32 GetDefaultBlockWidth() const {
+ return VideoCore::Surface::GetDefaultBlockWidth(pixel_format);
+ }
+
+ /// Returns the default block height.
+ u32 GetDefaultBlockHeight() const {
+ return VideoCore::Surface::GetDefaultBlockHeight(pixel_format);
+ }
+
+ /// Returns the bits per pixel.
+ u32 GetBitsPerPixel() const {
+ return VideoCore::Surface::GetFormatBpp(pixel_format);
+ }
+
+ /// Returns the bytes per pixel.
+ u32 GetBytesPerPixel() const {
+ return VideoCore::Surface::GetBytesPerPixel(pixel_format);
+ }
+
+ /// Returns true if the pixel format is a depth and/or stencil format.
+ bool IsPixelFormatZeta() const {
+ return pixel_format >= VideoCore::Surface::PixelFormat::MaxColorFormat &&
+ pixel_format < VideoCore::Surface::PixelFormat::MaxDepthStencilFormat;
+ }
+
+ /// Returns how the compression should be handled for this texture.
+ SurfaceCompression GetCompressionType() const {
+ return VideoCore::Surface::GetFormatCompressionType(pixel_format);
+ }
+
+ /// Returns is the surface is a TextureBuffer type of surface.
+ bool IsBuffer() const {
+ return target == VideoCore::Surface::SurfaceTarget::TextureBuffer;
+ }
+
+ /// Returns the debug name of the texture for use in graphic debuggers.
+ std::string TargetName() const;
+
+ // Helper used for out of class size calculations
+ static std::size_t AlignLayered(const std::size_t out_size, const u32 block_height,
+ const u32 block_depth) {
+ return Common::AlignBits(out_size,
+ Tegra::Texture::GetGOBSizeShift() + block_height + block_depth);
+ }
+
+ /// Converts a width from a type of surface into another. This helps represent the
+ /// equivalent value between compressed/non-compressed textures.
+ static u32 ConvertWidth(u32 width, VideoCore::Surface::PixelFormat pixel_format_from,
+ VideoCore::Surface::PixelFormat pixel_format_to) {
+ const u32 bw1 = VideoCore::Surface::GetDefaultBlockWidth(pixel_format_from);
+ const u32 bw2 = VideoCore::Surface::GetDefaultBlockWidth(pixel_format_to);
+ return (width * bw2 + bw1 - 1) / bw1;
+ }
+
+ /// Converts a height from a type of surface into another. This helps represent the
+ /// equivalent value between compressed/non-compressed textures.
+ static u32 ConvertHeight(u32 height, VideoCore::Surface::PixelFormat pixel_format_from,
+ VideoCore::Surface::PixelFormat pixel_format_to) {
+ const u32 bh1 = VideoCore::Surface::GetDefaultBlockHeight(pixel_format_from);
+ const u32 bh2 = VideoCore::Surface::GetDefaultBlockHeight(pixel_format_to);
+ return (height * bh2 + bh1 - 1) / bh1;
+ }
+
+ // Finds the maximun possible width between 2 2D layers of different formats
+ static u32 IntersectWidth(const SurfaceParams& src_params, const SurfaceParams& dst_params,
+ const u32 src_level, const u32 dst_level) {
+ const u32 bw1 = src_params.GetDefaultBlockWidth();
+ const u32 bw2 = dst_params.GetDefaultBlockWidth();
+ const u32 t_src_width = (src_params.GetMipWidth(src_level) * bw2 + bw1 - 1) / bw1;
+ const u32 t_dst_width = (dst_params.GetMipWidth(dst_level) * bw1 + bw2 - 1) / bw2;
+ return std::min(t_src_width, t_dst_width);
+ }
+
+ // Finds the maximun possible height between 2 2D layers of different formats
+ static u32 IntersectHeight(const SurfaceParams& src_params, const SurfaceParams& dst_params,
+ const u32 src_level, const u32 dst_level) {
+ const u32 bh1 = src_params.GetDefaultBlockHeight();
+ const u32 bh2 = dst_params.GetDefaultBlockHeight();
+ const u32 t_src_height = (src_params.GetMipHeight(src_level) * bh2 + bh1 - 1) / bh1;
+ const u32 t_dst_height = (dst_params.GetMipHeight(dst_level) * bh1 + bh2 - 1) / bh2;
+ return std::min(t_src_height, t_dst_height);
+ }
+
+ bool is_tiled;
+ bool srgb_conversion;
+ bool is_layered;
+ u32 block_width;
+ u32 block_height;
+ u32 block_depth;
+ u32 tile_width_spacing;
+ u32 width;
+ u32 height;
+ u32 depth;
+ u32 pitch;
+ u32 num_levels;
+ u32 emulated_levels;
+ VideoCore::Surface::PixelFormat pixel_format;
+ VideoCore::Surface::ComponentType component_type;
+ VideoCore::Surface::SurfaceType type;
+ VideoCore::Surface::SurfaceTarget target;
+
+private:
+ /// Returns the size of a given mipmap level inside a layer.
+ std::size_t GetInnerMipmapMemorySize(u32 level, bool as_host_size, bool uncompressed) const;
+
+ /// Returns the size of all mipmap levels and aligns as needed.
+ std::size_t GetInnerMemorySize(bool as_host_size, bool layer_only, bool uncompressed) const {
+ return GetLayerSize(as_host_size, uncompressed) * (layer_only ? 1U : depth);
+ }
+
+ /// Returns the size of a layer
+ std::size_t GetLayerSize(bool as_host_size, bool uncompressed) const;
+
+ std::size_t GetNumLayers() const {
+ return is_layered ? depth : 1;
+ }
+
+ /// Returns true if these parameters are from a layered surface.
+ bool IsLayered() const;
+};
+
+} // namespace VideoCommon
+
+namespace std {
+
+template <>
+struct hash<VideoCommon::SurfaceParams> {
+ std::size_t operator()(const VideoCommon::SurfaceParams& k) const noexcept {
+ return k.Hash();
+ }
+};
+
+} // namespace std
diff --git a/src/video_core/texture_cache/surface_view.cpp b/src/video_core/texture_cache/surface_view.cpp
new file mode 100644
index 000000000..467696a4c
--- /dev/null
+++ b/src/video_core/texture_cache/surface_view.cpp
@@ -0,0 +1,23 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include <tuple>
+
+#include "common/common_types.h"
+#include "video_core/texture_cache/surface_view.h"
+
+namespace VideoCommon {
+
+std::size_t ViewParams::Hash() const {
+ return static_cast<std::size_t>(base_layer) ^ static_cast<std::size_t>(num_layers << 16) ^
+ (static_cast<std::size_t>(base_level) << 24) ^
+ (static_cast<std::size_t>(num_levels) << 32) ^ (static_cast<std::size_t>(target) << 36);
+}
+
+bool ViewParams::operator==(const ViewParams& rhs) const {
+ return std::tie(base_layer, num_layers, base_level, num_levels, target) ==
+ std::tie(rhs.base_layer, rhs.num_layers, rhs.base_level, rhs.num_levels, rhs.target);
+}
+
+} // namespace VideoCommon
diff --git a/src/video_core/texture_cache/surface_view.h b/src/video_core/texture_cache/surface_view.h
new file mode 100644
index 000000000..04ca5639b
--- /dev/null
+++ b/src/video_core/texture_cache/surface_view.h
@@ -0,0 +1,67 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <functional>
+
+#include "common/common_types.h"
+#include "video_core/surface.h"
+#include "video_core/texture_cache/surface_params.h"
+
+namespace VideoCommon {
+
+struct ViewParams {
+ ViewParams(VideoCore::Surface::SurfaceTarget target, u32 base_layer, u32 num_layers,
+ u32 base_level, u32 num_levels)
+ : target{target}, base_layer{base_layer}, num_layers{num_layers}, base_level{base_level},
+ num_levels{num_levels} {}
+
+ std::size_t Hash() const;
+
+ bool operator==(const ViewParams& rhs) const;
+
+ VideoCore::Surface::SurfaceTarget target{};
+ u32 base_layer{};
+ u32 num_layers{};
+ u32 base_level{};
+ u32 num_levels{};
+
+ bool IsLayered() const {
+ switch (target) {
+ case VideoCore::Surface::SurfaceTarget::Texture1DArray:
+ case VideoCore::Surface::SurfaceTarget::Texture2DArray:
+ case VideoCore::Surface::SurfaceTarget::TextureCubemap:
+ case VideoCore::Surface::SurfaceTarget::TextureCubeArray:
+ return true;
+ default:
+ return false;
+ }
+ }
+};
+
+class ViewBase {
+public:
+ ViewBase(const ViewParams& params) : params{params} {}
+
+ const ViewParams& GetViewParams() const {
+ return params;
+ }
+
+protected:
+ ViewParams params;
+};
+
+} // namespace VideoCommon
+
+namespace std {
+
+template <>
+struct hash<VideoCommon::ViewParams> {
+ std::size_t operator()(const VideoCommon::ViewParams& k) const noexcept {
+ return k.Hash();
+ }
+};
+
+} // namespace std
diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h
new file mode 100644
index 000000000..c9e72531a
--- /dev/null
+++ b/src/video_core/texture_cache/texture_cache.h
@@ -0,0 +1,814 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <algorithm>
+#include <array>
+#include <memory>
+#include <mutex>
+#include <set>
+#include <tuple>
+#include <unordered_map>
+#include <vector>
+
+#include <boost/icl/interval_map.hpp>
+#include <boost/range/iterator_range.hpp>
+
+#include "common/assert.h"
+#include "common/common_types.h"
+#include "common/math_util.h"
+#include "core/core.h"
+#include "core/memory.h"
+#include "core/settings.h"
+#include "video_core/engines/fermi_2d.h"
+#include "video_core/engines/maxwell_3d.h"
+#include "video_core/gpu.h"
+#include "video_core/memory_manager.h"
+#include "video_core/rasterizer_interface.h"
+#include "video_core/surface.h"
+#include "video_core/texture_cache/copy_params.h"
+#include "video_core/texture_cache/surface_base.h"
+#include "video_core/texture_cache/surface_params.h"
+#include "video_core/texture_cache/surface_view.h"
+
+namespace Tegra::Texture {
+struct FullTextureInfo;
+}
+
+namespace VideoCore {
+class RasterizerInterface;
+}
+
+namespace VideoCommon {
+
+using VideoCore::Surface::PixelFormat;
+
+using VideoCore::Surface::SurfaceTarget;
+using RenderTargetConfig = Tegra::Engines::Maxwell3D::Regs::RenderTargetConfig;
+
+template <typename TSurface, typename TView>
+class TextureCache {
+ using IntervalMap = boost::icl::interval_map<CacheAddr, std::set<TSurface>>;
+ using IntervalType = typename IntervalMap::interval_type;
+
+public:
+ void InvalidateRegion(CacheAddr addr, std::size_t size) {
+ std::lock_guard lock{mutex};
+
+ for (const auto& surface : GetSurfacesInRegion(addr, size)) {
+ Unregister(surface);
+ }
+ }
+
+ /***
+ * `Guard` guarantees that rendertargets don't unregister themselves if the
+ * collide. Protection is currently only done on 3D slices.
+ ***/
+ void GuardRenderTargets(bool new_guard) {
+ guard_render_targets = new_guard;
+ }
+
+ void GuardSamplers(bool new_guard) {
+ guard_samplers = new_guard;
+ }
+
+ void FlushRegion(CacheAddr addr, std::size_t size) {
+ std::lock_guard lock{mutex};
+
+ auto surfaces = GetSurfacesInRegion(addr, size);
+ if (surfaces.empty()) {
+ return;
+ }
+ std::sort(surfaces.begin(), surfaces.end(), [](const TSurface& a, const TSurface& b) {
+ return a->GetModificationTick() < b->GetModificationTick();
+ });
+ for (const auto& surface : surfaces) {
+ FlushSurface(surface);
+ }
+ }
+
+ TView GetTextureSurface(const Tegra::Texture::FullTextureInfo& config,
+ const VideoCommon::Shader::Sampler& entry) {
+ std::lock_guard lock{mutex};
+ const auto gpu_addr{config.tic.Address()};
+ if (!gpu_addr) {
+ return {};
+ }
+ const auto params{SurfaceParams::CreateForTexture(system, config, entry)};
+ const auto [surface, view] = GetSurface(gpu_addr, params, true, false);
+ if (guard_samplers) {
+ sampled_textures.push_back(surface);
+ }
+ return view;
+ }
+
+ bool TextureBarrier() {
+ const bool any_rt =
+ std::any_of(sampled_textures.begin(), sampled_textures.end(),
+ [](const auto& surface) { return surface->IsRenderTarget(); });
+ sampled_textures.clear();
+ return any_rt;
+ }
+
+ TView GetDepthBufferSurface(bool preserve_contents) {
+ std::lock_guard lock{mutex};
+ auto& maxwell3d = system.GPU().Maxwell3D();
+
+ if (!maxwell3d.dirty_flags.zeta_buffer) {
+ return depth_buffer.view;
+ }
+ maxwell3d.dirty_flags.zeta_buffer = false;
+
+ const auto& regs{maxwell3d.regs};
+ const auto gpu_addr{regs.zeta.Address()};
+ if (!gpu_addr || !regs.zeta_enable) {
+ SetEmptyDepthBuffer();
+ return {};
+ }
+ const auto depth_params{SurfaceParams::CreateForDepthBuffer(
+ system, regs.zeta_width, regs.zeta_height, regs.zeta.format,
+ regs.zeta.memory_layout.block_width, regs.zeta.memory_layout.block_height,
+ regs.zeta.memory_layout.block_depth, regs.zeta.memory_layout.type)};
+ auto surface_view = GetSurface(gpu_addr, depth_params, preserve_contents, true);
+ if (depth_buffer.target)
+ depth_buffer.target->MarkAsRenderTarget(false);
+ depth_buffer.target = surface_view.first;
+ depth_buffer.view = surface_view.second;
+ if (depth_buffer.target)
+ depth_buffer.target->MarkAsRenderTarget(true);
+ return surface_view.second;
+ }
+
+ TView GetColorBufferSurface(std::size_t index, bool preserve_contents) {
+ std::lock_guard lock{mutex};
+ ASSERT(index < Tegra::Engines::Maxwell3D::Regs::NumRenderTargets);
+ auto& maxwell3d = system.GPU().Maxwell3D();
+ if (!maxwell3d.dirty_flags.color_buffer[index]) {
+ return render_targets[index].view;
+ }
+ maxwell3d.dirty_flags.color_buffer.reset(index);
+
+ const auto& regs{maxwell3d.regs};
+ if (index >= regs.rt_control.count || regs.rt[index].Address() == 0 ||
+ regs.rt[index].format == Tegra::RenderTargetFormat::NONE) {
+ SetEmptyColorBuffer(index);
+ return {};
+ }
+
+ const auto& config{regs.rt[index]};
+ const auto gpu_addr{config.Address()};
+ if (!gpu_addr) {
+ SetEmptyColorBuffer(index);
+ return {};
+ }
+
+ auto surface_view = GetSurface(gpu_addr, SurfaceParams::CreateForFramebuffer(system, index),
+ preserve_contents, true);
+ if (render_targets[index].target)
+ render_targets[index].target->MarkAsRenderTarget(false);
+ render_targets[index].target = surface_view.first;
+ render_targets[index].view = surface_view.second;
+ if (render_targets[index].target)
+ render_targets[index].target->MarkAsRenderTarget(true);
+ return surface_view.second;
+ }
+
+ void MarkColorBufferInUse(std::size_t index) {
+ if (auto& render_target = render_targets[index].target) {
+ render_target->MarkAsModified(true, Tick());
+ }
+ }
+
+ void MarkDepthBufferInUse() {
+ if (depth_buffer.target) {
+ depth_buffer.target->MarkAsModified(true, Tick());
+ }
+ }
+
+ void SetEmptyDepthBuffer() {
+ if (depth_buffer.target == nullptr) {
+ return;
+ }
+ depth_buffer.target->MarkAsRenderTarget(false);
+ depth_buffer.target = nullptr;
+ depth_buffer.view = nullptr;
+ }
+
+ void SetEmptyColorBuffer(std::size_t index) {
+ if (render_targets[index].target == nullptr) {
+ return;
+ }
+ render_targets[index].target->MarkAsRenderTarget(false);
+ render_targets[index].target = nullptr;
+ render_targets[index].view = nullptr;
+ }
+
+ void DoFermiCopy(const Tegra::Engines::Fermi2D::Regs::Surface& src_config,
+ const Tegra::Engines::Fermi2D::Regs::Surface& dst_config,
+ const Tegra::Engines::Fermi2D::Config& copy_config) {
+ std::lock_guard lock{mutex};
+ std::pair<TSurface, TView> dst_surface = GetFermiSurface(dst_config);
+ std::pair<TSurface, TView> src_surface = GetFermiSurface(src_config);
+ ImageBlit(src_surface.second, dst_surface.second, copy_config);
+ dst_surface.first->MarkAsModified(true, Tick());
+ }
+
+ TSurface TryFindFramebufferSurface(const u8* host_ptr) {
+ const CacheAddr cache_addr = ToCacheAddr(host_ptr);
+ if (!cache_addr) {
+ return nullptr;
+ }
+ const CacheAddr page = cache_addr >> registry_page_bits;
+ std::vector<TSurface>& list = registry[page];
+ for (auto& surface : list) {
+ if (surface->GetCacheAddr() == cache_addr) {
+ return surface;
+ }
+ }
+ return nullptr;
+ }
+
+ u64 Tick() {
+ return ++ticks;
+ }
+
+protected:
+ TextureCache(Core::System& system, VideoCore::RasterizerInterface& rasterizer)
+ : system{system}, rasterizer{rasterizer} {
+ for (std::size_t i = 0; i < Tegra::Engines::Maxwell3D::Regs::NumRenderTargets; i++) {
+ SetEmptyColorBuffer(i);
+ }
+
+ SetEmptyDepthBuffer();
+ staging_cache.SetSize(2);
+
+ const auto make_siblings = [this](PixelFormat a, PixelFormat b) {
+ siblings_table[static_cast<std::size_t>(a)] = b;
+ siblings_table[static_cast<std::size_t>(b)] = a;
+ };
+ std::fill(siblings_table.begin(), siblings_table.end(), PixelFormat::Invalid);
+ make_siblings(PixelFormat::Z16, PixelFormat::R16U);
+ make_siblings(PixelFormat::Z32F, PixelFormat::R32F);
+ make_siblings(PixelFormat::Z32FS8, PixelFormat::RG32F);
+
+ sampled_textures.reserve(64);
+ }
+
+ ~TextureCache() = default;
+
+ virtual TSurface CreateSurface(GPUVAddr gpu_addr, const SurfaceParams& params) = 0;
+
+ virtual void ImageCopy(TSurface& src_surface, TSurface& dst_surface,
+ const CopyParams& copy_params) = 0;
+
+ virtual void ImageBlit(TView& src_view, TView& dst_view,
+ const Tegra::Engines::Fermi2D::Config& copy_config) = 0;
+
+ // Depending on the backend, a buffer copy can be slow as it means deoptimizing the texture
+ // and reading it from a sepparate buffer.
+ virtual void BufferCopy(TSurface& src_surface, TSurface& dst_surface) = 0;
+
+ void Register(TSurface surface) {
+ const GPUVAddr gpu_addr = surface->GetGpuAddr();
+ const CacheAddr cache_ptr = ToCacheAddr(system.GPU().MemoryManager().GetPointer(gpu_addr));
+ const std::size_t size = surface->GetSizeInBytes();
+ const std::optional<VAddr> cpu_addr =
+ system.GPU().MemoryManager().GpuToCpuAddress(gpu_addr);
+ if (!cache_ptr || !cpu_addr) {
+ LOG_CRITICAL(HW_GPU, "Failed to register surface with unmapped gpu_address 0x{:016x}",
+ gpu_addr);
+ return;
+ }
+ const bool continuous = system.GPU().MemoryManager().IsBlockContinuous(gpu_addr, size);
+ surface->MarkAsContinuous(continuous);
+ surface->SetCacheAddr(cache_ptr);
+ surface->SetCpuAddr(*cpu_addr);
+ RegisterInnerCache(surface);
+ surface->MarkAsRegistered(true);
+ rasterizer.UpdatePagesCachedCount(*cpu_addr, size, 1);
+ }
+
+ void Unregister(TSurface surface) {
+ if (guard_render_targets && surface->IsProtected()) {
+ return;
+ }
+ const GPUVAddr gpu_addr = surface->GetGpuAddr();
+ const CacheAddr cache_ptr = surface->GetCacheAddr();
+ const std::size_t size = surface->GetSizeInBytes();
+ const VAddr cpu_addr = surface->GetCpuAddr();
+ rasterizer.UpdatePagesCachedCount(cpu_addr, size, -1);
+ UnregisterInnerCache(surface);
+ surface->MarkAsRegistered(false);
+ ReserveSurface(surface->GetSurfaceParams(), surface);
+ }
+
+ TSurface GetUncachedSurface(const GPUVAddr gpu_addr, const SurfaceParams& params) {
+ if (const auto surface = TryGetReservedSurface(params); surface) {
+ surface->SetGpuAddr(gpu_addr);
+ return surface;
+ }
+ // No reserved surface available, create a new one and reserve it
+ auto new_surface{CreateSurface(gpu_addr, params)};
+ return new_surface;
+ }
+
+ std::pair<TSurface, TView> GetFermiSurface(
+ const Tegra::Engines::Fermi2D::Regs::Surface& config) {
+ SurfaceParams params = SurfaceParams::CreateForFermiCopySurface(config);
+ const GPUVAddr gpu_addr = config.Address();
+ return GetSurface(gpu_addr, params, true, false);
+ }
+
+ Core::System& system;
+
+private:
+ enum class RecycleStrategy : u32 {
+ Ignore = 0,
+ Flush = 1,
+ BufferCopy = 3,
+ };
+
+ /**
+ * `PickStrategy` takes care of selecting a proper strategy to deal with a texture recycle.
+ * @param overlaps, the overlapping surfaces registered in the cache.
+ * @param params, the paremeters on the new surface.
+ * @param gpu_addr, the starting address of the new surface.
+ * @param untopological, tells the recycler that the texture has no way to match the overlaps
+ * due to topological reasons.
+ **/
+ RecycleStrategy PickStrategy(std::vector<TSurface>& overlaps, const SurfaceParams& params,
+ const GPUVAddr gpu_addr, const MatchTopologyResult untopological) {
+ if (Settings::values.use_accurate_gpu_emulation) {
+ return RecycleStrategy::Flush;
+ }
+ // 3D Textures decision
+ if (params.block_depth > 1 || params.target == SurfaceTarget::Texture3D) {
+ return RecycleStrategy::Flush;
+ }
+ for (auto s : overlaps) {
+ const auto& s_params = s->GetSurfaceParams();
+ if (s_params.block_depth > 1 || s_params.target == SurfaceTarget::Texture3D) {
+ return RecycleStrategy::Flush;
+ }
+ }
+ // Untopological decision
+ if (untopological == MatchTopologyResult::CompressUnmatch) {
+ return RecycleStrategy::Flush;
+ }
+ if (untopological == MatchTopologyResult::FullMatch && !params.is_tiled) {
+ return RecycleStrategy::Flush;
+ }
+ return RecycleStrategy::Ignore;
+ }
+
+ /**
+ * `RecycleSurface` es a method we use to decide what to do with textures we can't resolve in
+ *the cache It has 2 implemented strategies: Ignore and Flush. Ignore just unregisters all the
+ *overlaps and loads the new texture. Flush, flushes all the overlaps into memory and loads the
+ *new surface from that data.
+ * @param overlaps, the overlapping surfaces registered in the cache.
+ * @param params, the paremeters on the new surface.
+ * @param gpu_addr, the starting address of the new surface.
+ * @param preserve_contents, tells if the new surface should be loaded from meory or left blank
+ * @param untopological, tells the recycler that the texture has no way to match the overlaps
+ * due to topological reasons.
+ **/
+ std::pair<TSurface, TView> RecycleSurface(std::vector<TSurface>& overlaps,
+ const SurfaceParams& params, const GPUVAddr gpu_addr,
+ const bool preserve_contents,
+ const MatchTopologyResult untopological) {
+ const bool do_load = preserve_contents && Settings::values.use_accurate_gpu_emulation;
+ for (auto& surface : overlaps) {
+ Unregister(surface);
+ }
+ switch (PickStrategy(overlaps, params, gpu_addr, untopological)) {
+ case RecycleStrategy::Ignore: {
+ return InitializeSurface(gpu_addr, params, do_load);
+ }
+ case RecycleStrategy::Flush: {
+ std::sort(overlaps.begin(), overlaps.end(),
+ [](const TSurface& a, const TSurface& b) -> bool {
+ return a->GetModificationTick() < b->GetModificationTick();
+ });
+ for (auto& surface : overlaps) {
+ FlushSurface(surface);
+ }
+ return InitializeSurface(gpu_addr, params, preserve_contents);
+ }
+ case RecycleStrategy::BufferCopy: {
+ auto new_surface = GetUncachedSurface(gpu_addr, params);
+ BufferCopy(overlaps[0], new_surface);
+ return {new_surface, new_surface->GetMainView()};
+ }
+ default: {
+ UNIMPLEMENTED_MSG("Unimplemented Texture Cache Recycling Strategy!");
+ return InitializeSurface(gpu_addr, params, do_load);
+ }
+ }
+ }
+
+ /**
+ * `RebuildSurface` this method takes a single surface and recreates into another that
+ * may differ in format, target or width alingment.
+ * @param current_surface, the registered surface in the cache which we want to convert.
+ * @param params, the new surface params which we'll use to recreate the surface.
+ **/
+ std::pair<TSurface, TView> RebuildSurface(TSurface current_surface, const SurfaceParams& params,
+ bool is_render) {
+ const auto gpu_addr = current_surface->GetGpuAddr();
+ const auto& cr_params = current_surface->GetSurfaceParams();
+ TSurface new_surface;
+ if (cr_params.pixel_format != params.pixel_format && !is_render &&
+ GetSiblingFormat(cr_params.pixel_format) == params.pixel_format) {
+ SurfaceParams new_params = params;
+ new_params.pixel_format = cr_params.pixel_format;
+ new_params.component_type = cr_params.component_type;
+ new_params.type = cr_params.type;
+ new_surface = GetUncachedSurface(gpu_addr, new_params);
+ } else {
+ new_surface = GetUncachedSurface(gpu_addr, params);
+ }
+ const auto& final_params = new_surface->GetSurfaceParams();
+ if (cr_params.type != final_params.type ||
+ (cr_params.component_type != final_params.component_type)) {
+ BufferCopy(current_surface, new_surface);
+ } else {
+ std::vector<CopyParams> bricks = current_surface->BreakDown(final_params);
+ for (auto& brick : bricks) {
+ ImageCopy(current_surface, new_surface, brick);
+ }
+ }
+ Unregister(current_surface);
+ Register(new_surface);
+ new_surface->MarkAsModified(current_surface->IsModified(), Tick());
+ return {new_surface, new_surface->GetMainView()};
+ }
+
+ /**
+ * `ManageStructuralMatch` this method takes a single surface and checks with the new surface's
+ * params if it's an exact match, we return the main view of the registered surface. If it's
+ * formats don't match, we rebuild the surface. We call this last method a `Mirage`. If formats
+ * match but the targets don't, we create an overview View of the registered surface.
+ * @param current_surface, the registered surface in the cache which we want to convert.
+ * @param params, the new surface params which we want to check.
+ **/
+ std::pair<TSurface, TView> ManageStructuralMatch(TSurface current_surface,
+ const SurfaceParams& params, bool is_render) {
+ const bool is_mirage = !current_surface->MatchFormat(params.pixel_format);
+ const bool matches_target = current_surface->MatchTarget(params.target);
+ const auto match_check = [&]() -> std::pair<TSurface, TView> {
+ if (matches_target) {
+ return {current_surface, current_surface->GetMainView()};
+ }
+ return {current_surface, current_surface->EmplaceOverview(params)};
+ };
+ if (!is_mirage) {
+ return match_check();
+ }
+ if (!is_render && GetSiblingFormat(current_surface->GetFormat()) == params.pixel_format) {
+ return match_check();
+ }
+ return RebuildSurface(current_surface, params, is_render);
+ }
+
+ /**
+ * `TryReconstructSurface` unlike `RebuildSurface` where we know the registered surface
+ * matches the candidate in some way, we got no guarantess here. We try to see if the overlaps
+ * are sublayers/mipmaps of the new surface, if they all match we end up recreating a surface
+ * for them, else we return nothing.
+ * @param overlaps, the overlapping surfaces registered in the cache.
+ * @param params, the paremeters on the new surface.
+ * @param gpu_addr, the starting address of the new surface.
+ **/
+ std::optional<std::pair<TSurface, TView>> TryReconstructSurface(std::vector<TSurface>& overlaps,
+ const SurfaceParams& params,
+ const GPUVAddr gpu_addr) {
+ if (params.target == SurfaceTarget::Texture3D) {
+ return {};
+ }
+ bool modified = false;
+ TSurface new_surface = GetUncachedSurface(gpu_addr, params);
+ u32 passed_tests = 0;
+ for (auto& surface : overlaps) {
+ const SurfaceParams& src_params = surface->GetSurfaceParams();
+ if (src_params.is_layered || src_params.num_levels > 1) {
+ // We send this cases to recycle as they are more complex to handle
+ return {};
+ }
+ const std::size_t candidate_size = surface->GetSizeInBytes();
+ auto mipmap_layer{new_surface->GetLayerMipmap(surface->GetGpuAddr())};
+ if (!mipmap_layer) {
+ continue;
+ }
+ const auto [layer, mipmap] = *mipmap_layer;
+ if (new_surface->GetMipmapSize(mipmap) != candidate_size) {
+ continue;
+ }
+ modified |= surface->IsModified();
+ // Now we got all the data set up
+ const u32 width = SurfaceParams::IntersectWidth(src_params, params, 0, mipmap);
+ const u32 height = SurfaceParams::IntersectHeight(src_params, params, 0, mipmap);
+ const CopyParams copy_params(0, 0, 0, 0, 0, layer, 0, mipmap, width, height, 1);
+ passed_tests++;
+ ImageCopy(surface, new_surface, copy_params);
+ }
+ if (passed_tests == 0) {
+ return {};
+ // In Accurate GPU all tests should pass, else we recycle
+ } else if (Settings::values.use_accurate_gpu_emulation && passed_tests != overlaps.size()) {
+ return {};
+ }
+ for (auto surface : overlaps) {
+ Unregister(surface);
+ }
+ new_surface->MarkAsModified(modified, Tick());
+ Register(new_surface);
+ return {{new_surface, new_surface->GetMainView()}};
+ }
+
+ /**
+ * `GetSurface` gets the starting address and parameters of a candidate surface and tries
+ * to find a matching surface within the cache. This is done in 3 big steps. The first is to
+ * check the 1st Level Cache in order to find an exact match, if we fail, we move to step 2.
+ * Step 2 is checking if there are any overlaps at all, if none, we just load the texture from
+ * memory else we move to step 3. Step 3 consists on figuring the relationship between the
+ * candidate texture and the overlaps. We divide the scenarios depending if there's 1 or many
+ * overlaps. If there's many, we just try to reconstruct a new surface out of them based on the
+ * candidate's parameters, if we fail, we recycle. When there's only 1 overlap then we have to
+ * check if the candidate is a view (layer/mipmap) of the overlap or if the registered surface
+ * is a mipmap/layer of the candidate. In this last case we reconstruct a new surface.
+ * @param gpu_addr, the starting address of the candidate surface.
+ * @param params, the paremeters on the candidate surface.
+ * @param preserve_contents, tells if the new surface should be loaded from meory or left blank.
+ **/
+ std::pair<TSurface, TView> GetSurface(const GPUVAddr gpu_addr, const SurfaceParams& params,
+ bool preserve_contents, bool is_render) {
+ const auto host_ptr{system.GPU().MemoryManager().GetPointer(gpu_addr)};
+ const auto cache_addr{ToCacheAddr(host_ptr)};
+
+ // Step 0: guarantee a valid surface
+ if (!cache_addr) {
+ // Return a null surface if it's invalid
+ SurfaceParams new_params = params;
+ new_params.width = 1;
+ new_params.height = 1;
+ new_params.depth = 1;
+ new_params.block_height = 0;
+ new_params.block_depth = 0;
+ return InitializeSurface(gpu_addr, new_params, false);
+ }
+
+ // Step 1
+ // Check Level 1 Cache for a fast structural match. If candidate surface
+ // matches at certain level we are pretty much done.
+ if (const auto iter = l1_cache.find(cache_addr); iter != l1_cache.end()) {
+ TSurface& current_surface = iter->second;
+ const auto topological_result = current_surface->MatchesTopology(params);
+ if (topological_result != MatchTopologyResult::FullMatch) {
+ std::vector<TSurface> overlaps{current_surface};
+ return RecycleSurface(overlaps, params, gpu_addr, preserve_contents,
+ topological_result);
+ }
+ const auto struct_result = current_surface->MatchesStructure(params);
+ if (struct_result != MatchStructureResult::None &&
+ (params.target != SurfaceTarget::Texture3D ||
+ current_surface->MatchTarget(params.target))) {
+ if (struct_result == MatchStructureResult::FullMatch) {
+ return ManageStructuralMatch(current_surface, params, is_render);
+ } else {
+ return RebuildSurface(current_surface, params, is_render);
+ }
+ }
+ }
+
+ // Step 2
+ // Obtain all possible overlaps in the memory region
+ const std::size_t candidate_size = params.GetGuestSizeInBytes();
+ auto overlaps{GetSurfacesInRegion(cache_addr, candidate_size)};
+
+ // If none are found, we are done. we just load the surface and create it.
+ if (overlaps.empty()) {
+ return InitializeSurface(gpu_addr, params, preserve_contents);
+ }
+
+ // Step 3
+ // Now we need to figure the relationship between the texture and its overlaps
+ // we do a topological test to ensure we can find some relationship. If it fails
+ // inmediatly recycle the texture
+ for (const auto& surface : overlaps) {
+ const auto topological_result = surface->MatchesTopology(params);
+ if (topological_result != MatchTopologyResult::FullMatch) {
+ return RecycleSurface(overlaps, params, gpu_addr, preserve_contents,
+ topological_result);
+ }
+ }
+
+ // Split cases between 1 overlap or many.
+ if (overlaps.size() == 1) {
+ TSurface current_surface = overlaps[0];
+ // First check if the surface is within the overlap. If not, it means
+ // two things either the candidate surface is a supertexture of the overlap
+ // or they don't match in any known way.
+ if (!current_surface->IsInside(gpu_addr, gpu_addr + candidate_size)) {
+ if (current_surface->GetGpuAddr() == gpu_addr) {
+ std::optional<std::pair<TSurface, TView>> view =
+ TryReconstructSurface(overlaps, params, gpu_addr);
+ if (view) {
+ return *view;
+ }
+ }
+ return RecycleSurface(overlaps, params, gpu_addr, preserve_contents,
+ MatchTopologyResult::FullMatch);
+ }
+ // Now we check if the candidate is a mipmap/layer of the overlap
+ std::optional<TView> view =
+ current_surface->EmplaceView(params, gpu_addr, candidate_size);
+ if (view) {
+ const bool is_mirage = !current_surface->MatchFormat(params.pixel_format);
+ if (is_mirage) {
+ // On a mirage view, we need to recreate the surface under this new view
+ // and then obtain a view again.
+ SurfaceParams new_params = current_surface->GetSurfaceParams();
+ const u32 wh = SurfaceParams::ConvertWidth(
+ new_params.width, new_params.pixel_format, params.pixel_format);
+ const u32 hh = SurfaceParams::ConvertHeight(
+ new_params.height, new_params.pixel_format, params.pixel_format);
+ new_params.width = wh;
+ new_params.height = hh;
+ new_params.pixel_format = params.pixel_format;
+ std::pair<TSurface, TView> pair =
+ RebuildSurface(current_surface, new_params, is_render);
+ std::optional<TView> mirage_view =
+ pair.first->EmplaceView(params, gpu_addr, candidate_size);
+ if (mirage_view)
+ return {pair.first, *mirage_view};
+ return RecycleSurface(overlaps, params, gpu_addr, preserve_contents,
+ MatchTopologyResult::FullMatch);
+ }
+ return {current_surface, *view};
+ }
+ // The next case is unsafe, so if we r in accurate GPU, just skip it
+ if (Settings::values.use_accurate_gpu_emulation) {
+ return RecycleSurface(overlaps, params, gpu_addr, preserve_contents,
+ MatchTopologyResult::FullMatch);
+ }
+ // This is the case the texture is a part of the parent.
+ if (current_surface->MatchesSubTexture(params, gpu_addr)) {
+ return RebuildSurface(current_surface, params, is_render);
+ }
+ } else {
+ // If there are many overlaps, odds are they are subtextures of the candidate
+ // surface. We try to construct a new surface based on the candidate parameters,
+ // using the overlaps. If a single overlap fails, this will fail.
+ std::optional<std::pair<TSurface, TView>> view =
+ TryReconstructSurface(overlaps, params, gpu_addr);
+ if (view) {
+ return *view;
+ }
+ }
+ // We failed all the tests, recycle the overlaps into a new texture.
+ return RecycleSurface(overlaps, params, gpu_addr, preserve_contents,
+ MatchTopologyResult::FullMatch);
+ }
+
+ std::pair<TSurface, TView> InitializeSurface(GPUVAddr gpu_addr, const SurfaceParams& params,
+ bool preserve_contents) {
+ auto new_surface{GetUncachedSurface(gpu_addr, params)};
+ Register(new_surface);
+ if (preserve_contents) {
+ LoadSurface(new_surface);
+ }
+ return {new_surface, new_surface->GetMainView()};
+ }
+
+ void LoadSurface(const TSurface& surface) {
+ staging_cache.GetBuffer(0).resize(surface->GetHostSizeInBytes());
+ surface->LoadBuffer(system.GPU().MemoryManager(), staging_cache);
+ surface->UploadTexture(staging_cache.GetBuffer(0));
+ surface->MarkAsModified(false, Tick());
+ }
+
+ void FlushSurface(const TSurface& surface) {
+ if (!surface->IsModified()) {
+ return;
+ }
+ staging_cache.GetBuffer(0).resize(surface->GetHostSizeInBytes());
+ surface->DownloadTexture(staging_cache.GetBuffer(0));
+ surface->FlushBuffer(system.GPU().MemoryManager(), staging_cache);
+ surface->MarkAsModified(false, Tick());
+ }
+
+ void RegisterInnerCache(TSurface& surface) {
+ const CacheAddr cache_addr = surface->GetCacheAddr();
+ CacheAddr start = cache_addr >> registry_page_bits;
+ const CacheAddr end = (surface->GetCacheAddrEnd() - 1) >> registry_page_bits;
+ l1_cache[cache_addr] = surface;
+ while (start <= end) {
+ registry[start].push_back(surface);
+ start++;
+ }
+ }
+
+ void UnregisterInnerCache(TSurface& surface) {
+ const CacheAddr cache_addr = surface->GetCacheAddr();
+ CacheAddr start = cache_addr >> registry_page_bits;
+ const CacheAddr end = (surface->GetCacheAddrEnd() - 1) >> registry_page_bits;
+ l1_cache.erase(cache_addr);
+ while (start <= end) {
+ auto& reg{registry[start]};
+ reg.erase(std::find(reg.begin(), reg.end(), surface));
+ start++;
+ }
+ }
+
+ std::vector<TSurface> GetSurfacesInRegion(const CacheAddr cache_addr, const std::size_t size) {
+ if (size == 0) {
+ return {};
+ }
+ const CacheAddr cache_addr_end = cache_addr + size;
+ CacheAddr start = cache_addr >> registry_page_bits;
+ const CacheAddr end = (cache_addr_end - 1) >> registry_page_bits;
+ std::vector<TSurface> surfaces;
+ while (start <= end) {
+ std::vector<TSurface>& list = registry[start];
+ for (auto& surface : list) {
+ if (!surface->IsPicked() && surface->Overlaps(cache_addr, cache_addr_end)) {
+ surface->MarkAsPicked(true);
+ surfaces.push_back(surface);
+ }
+ }
+ start++;
+ }
+ for (auto& surface : surfaces) {
+ surface->MarkAsPicked(false);
+ }
+ return surfaces;
+ }
+
+ void ReserveSurface(const SurfaceParams& params, TSurface surface) {
+ surface_reserve[params].push_back(std::move(surface));
+ }
+
+ TSurface TryGetReservedSurface(const SurfaceParams& params) {
+ auto search{surface_reserve.find(params)};
+ if (search == surface_reserve.end()) {
+ return {};
+ }
+ for (auto& surface : search->second) {
+ if (!surface->IsRegistered()) {
+ return surface;
+ }
+ }
+ return {};
+ }
+
+ constexpr PixelFormat GetSiblingFormat(PixelFormat format) const {
+ return siblings_table[static_cast<std::size_t>(format)];
+ }
+
+ struct FramebufferTargetInfo {
+ TSurface target;
+ TView view;
+ };
+
+ VideoCore::RasterizerInterface& rasterizer;
+
+ u64 ticks{};
+
+ // Guards the cache for protection conflicts.
+ bool guard_render_targets{};
+ bool guard_samplers{};
+
+ // The siblings table is for formats that can inter exchange with one another
+ // without causing issues. This is only valid when a conflict occurs on a non
+ // rendering use.
+ std::array<PixelFormat, static_cast<std::size_t>(PixelFormat::Max)> siblings_table;
+
+ // The internal Cache is different for the Texture Cache. It's based on buckets
+ // of 1MB. This fits better for the purpose of this cache as textures are normaly
+ // large in size.
+ static constexpr u64 registry_page_bits{20};
+ static constexpr u64 registry_page_size{1 << registry_page_bits};
+ std::unordered_map<CacheAddr, std::vector<TSurface>> registry;
+
+ // The L1 Cache is used for fast texture lookup before checking the overlaps
+ // This avoids calculating size and other stuffs.
+ std::unordered_map<CacheAddr, TSurface> l1_cache;
+
+ /// The surface reserve is a "backup" cache, this is where we put unique surfaces that have
+ /// previously been used. This is to prevent surfaces from being constantly created and
+ /// destroyed when used with different surface parameters.
+ std::unordered_map<SurfaceParams, std::vector<TSurface>> surface_reserve;
+ std::array<FramebufferTargetInfo, Tegra::Engines::Maxwell3D::Regs::NumRenderTargets>
+ render_targets;
+ FramebufferTargetInfo depth_buffer;
+
+ std::vector<TSurface> sampled_textures;
+
+ StagingCache staging_cache;
+ std::recursive_mutex mutex;
+};
+
+} // namespace VideoCommon