summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-x.travis/linux-mingw/build.sh5
-rw-r--r--CMakeLists.txt28
-rw-r--r--CMakeModules/GenerateSCMRev.cmake1
-rw-r--r--src/audio_core/stream.cpp2
-rw-r--r--src/common/CMakeLists.txt2
-rw-r--r--src/common/alignment.h6
-rw-r--r--src/common/binary_find.h21
-rw-r--r--src/common/bit_util.h44
-rw-r--r--src/common/common_funcs.h1
-rw-r--r--src/core/CMakeLists.txt6
-rw-r--r--src/core/core.cpp4
-rw-r--r--src/core/core_cpu.cpp4
-rw-r--r--src/core/core_timing.cpp34
-rw-r--r--src/core/core_timing.h23
-rw-r--r--src/core/file_sys/nca_metadata.h6
-rw-r--r--src/core/file_sys/registered_cache.cpp9
-rw-r--r--src/core/file_sys/submission_package.cpp11
-rw-r--r--src/core/hle/kernel/process.cpp135
-rw-r--r--src/core/hle/kernel/process.h12
-rw-r--r--src/core/hle/kernel/svc.cpp20
-rw-r--r--src/core/hle/kernel/thread.cpp10
-rw-r--r--src/core/hle/kernel/thread.h16
-rw-r--r--src/core/hle/kernel/vm_manager.cpp76
-rw-r--r--src/core/hle/kernel/vm_manager.h51
-rw-r--r--src/core/hle/service/acc/acc.cpp73
-rw-r--r--src/core/hle/service/acc/acc.h24
-rw-r--r--src/core/hle/service/acc/acc_u0.cpp4
-rw-r--r--src/core/hle/service/acc/errors.h14
-rw-r--r--src/core/hle/service/am/am.cpp26
-rw-r--r--src/core/hle/service/am/am.h1
-rw-r--r--src/core/hle/service/am/applets/web_browser.cpp4
-rw-r--r--src/core/hle/service/audio/audren_u.cpp19
-rw-r--r--src/core/hle/service/filesystem/filesystem.cpp10
-rw-r--r--src/core/hle/service/filesystem/filesystem.h2
-rw-r--r--src/core/hle/service/filesystem/fsp_srv.cpp54
-rw-r--r--src/core/hle/service/filesystem/fsp_srv.h26
-rw-r--r--src/core/hle/service/friend/errors.h12
-rw-r--r--src/core/hle/service/friend/friend.cpp115
-rw-r--r--src/core/hle/service/friend/friend.h1
-rw-r--r--src/core/hle/service/friend/interface.cpp2
-rw-r--r--src/core/hle/service/service.cpp7
-rw-r--r--src/core/hle/service/service.h3
-rw-r--r--src/core/hle/service/set/set.cpp10
-rw-r--r--src/core/hle/service/set/set.h1
-rw-r--r--src/core/hle/service/time/interface.cpp11
-rw-r--r--src/core/hle/service/time/interface.h5
-rw-r--r--src/core/hle/service/time/time.cpp115
-rw-r--r--src/core/hle/service/time/time.h11
-rw-r--r--src/core/hle/service/time/time_sharedmemory.cpp68
-rw-r--r--src/core/hle/service/time/time_sharedmemory.h74
-rw-r--r--src/core/loader/nsp.cpp3
-rw-r--r--src/core/loader/xci.cpp2
-rw-r--r--src/core/memory.cpp2
-rw-r--r--src/core/memory.h4
-rw-r--r--src/core/reporter.cpp60
-rw-r--r--src/core/reporter.h15
-rw-r--r--src/core/settings.cpp2
-rw-r--r--src/core/settings.h3
-rw-r--r--src/core/telemetry_session.cpp2
-rw-r--r--src/core/tools/freezer.cpp188
-rw-r--r--src/core/tools/freezer.h82
-rw-r--r--src/tests/core/core_timing.cpp20
-rw-r--r--src/video_core/CMakeLists.txt16
-rw-r--r--src/video_core/engines/engine_upload.cpp6
-rw-r--r--src/video_core/engines/engine_upload.h6
-rw-r--r--src/video_core/engines/fermi_2d.cpp25
-rw-r--r--src/video_core/engines/fermi_2d.h53
-rw-r--r--src/video_core/engines/maxwell_3d.cpp12
-rw-r--r--src/video_core/engines/maxwell_dma.cpp2
-rw-r--r--src/video_core/engines/maxwell_dma.h4
-rw-r--r--src/video_core/engines/shader_bytecode.h83
-rw-r--r--src/video_core/memory_manager.cpp5
-rw-r--r--src/video_core/rasterizer_cache.h3
-rw-r--r--src/video_core/rasterizer_interface.h7
-rw-r--r--src/video_core/renderer_opengl/gl_buffer_cache.cpp2
-rw-r--r--src/video_core/renderer_opengl/gl_framebuffer_cache.cpp75
-rw-r--r--src/video_core/renderer_opengl/gl_framebuffer_cache.h68
-rw-r--r--src/video_core/renderer_opengl/gl_global_cache.cpp1
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer.cpp178
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer.h26
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer_cache.cpp1362
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer_cache.h572
-rw-r--r--src/video_core/renderer_opengl/gl_resource_manager.cpp24
-rw-r--r--src/video_core/renderer_opengl/gl_resource_manager.h28
-rw-r--r--src/video_core/renderer_opengl/gl_shader_cache.cpp167
-rw-r--r--src/video_core/renderer_opengl/gl_shader_cache.h46
-rw-r--r--src/video_core/renderer_opengl/gl_shader_decompiler.cpp116
-rw-r--r--src/video_core/renderer_opengl/gl_shader_decompiler.h2
-rw-r--r--src/video_core/renderer_opengl/gl_shader_disk_cache.cpp38
-rw-r--r--src/video_core/renderer_opengl/gl_shader_disk_cache.h76
-rw-r--r--src/video_core/renderer_opengl/gl_stream_buffer.cpp5
-rw-r--r--src/video_core/renderer_opengl/gl_stream_buffer.h3
-rw-r--r--src/video_core/renderer_opengl/gl_texture_cache.cpp614
-rw-r--r--src/video_core/renderer_opengl/gl_texture_cache.h143
-rw-r--r--src/video_core/renderer_opengl/renderer_opengl.cpp1
-rw-r--r--src/video_core/renderer_opengl/utils.cpp4
-rw-r--r--src/video_core/renderer_opengl/utils.h2
-rw-r--r--src/video_core/renderer_vulkan/vk_shader_decompiler.cpp7
-rw-r--r--src/video_core/shader/decode.cpp1
-rw-r--r--src/video_core/shader/decode/decode_integer_set.cpp0
-rw-r--r--src/video_core/shader/decode/image.cpp120
-rw-r--r--src/video_core/shader/decode/texture.cpp45
-rw-r--r--src/video_core/shader/node.h52
-rw-r--r--src/video_core/shader/shader_ir.h14
-rw-r--r--src/video_core/surface.cpp8
-rw-r--r--src/video_core/surface.h225
-rw-r--r--src/video_core/texture_cache.cpp386
-rw-r--r--src/video_core/texture_cache.h586
-rw-r--r--src/video_core/texture_cache/copy_params.h36
-rw-r--r--src/video_core/texture_cache/surface_base.cpp300
-rw-r--r--src/video_core/texture_cache/surface_base.h317
-rw-r--r--src/video_core/texture_cache/surface_params.cpp334
-rw-r--r--src/video_core/texture_cache/surface_params.h286
-rw-r--r--src/video_core/texture_cache/surface_view.cpp23
-rw-r--r--src/video_core/texture_cache/surface_view.h67
-rw-r--r--src/video_core/texture_cache/texture_cache.h814
-rw-r--r--src/video_core/textures/convert.cpp14
-rw-r--r--src/video_core/textures/convert.h7
-rw-r--r--src/video_core/textures/decoders.cpp42
-rw-r--r--src/video_core/textures/decoders.h4
-rw-r--r--src/video_core/textures/texture.h31
-rw-r--r--src/yuzu/configuration/config.cpp7
-rw-r--r--src/yuzu/configuration/configure_debug.cpp2
-rw-r--r--src/yuzu/configuration/configure_debug.ui18
-rw-r--r--src/yuzu/configuration/configure_general.cpp5
-rw-r--r--src/yuzu/configuration/configure_general.ui20
-rw-r--r--src/yuzu/main.cpp3
-rw-r--r--src/yuzu_cmd/config.cpp3
-rw-r--r--src/yuzu_cmd/default_ini.h5
-rw-r--r--src/yuzu_cmd/emu_window/emu_window_sdl2_gl.cpp4
-rw-r--r--src/yuzu_tester/config.cpp2
-rw-r--r--src/yuzu_tester/default_ini.h2
132 files changed, 5506 insertions, 3661 deletions
diff --git a/.travis/linux-mingw/build.sh b/.travis/linux-mingw/build.sh
index c32a909d3..b12d70b12 100755
--- a/.travis/linux-mingw/build.sh
+++ b/.travis/linux-mingw/build.sh
@@ -1,4 +1,3 @@
#!/bin/bash -ex
-
-mkdir -p "$HOME/.ccache"
-docker run -e ENABLE_COMPATIBILITY_REPORTING --env-file .travis/common/travis-ci.env -v $(pwd):/yuzu -v "$HOME/.ccache":/root/.ccache yuzuemu/build-environments:linux-mingw /bin/bash /yuzu/.travis/linux-mingw/docker.sh
+mkdir "$HOME/.ccache" || true
+docker run --env-file .travis/common/travis-ci.env -v $(pwd):/yuzu -v "$HOME/.ccache":/root/.ccache yuzuemu/build-environments:linux-mingw /bin/bash -ex /yuzu/.travis/linux-mingw/docker.sh
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 9a207f9e3..bfa104034 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -7,18 +7,6 @@ include(CMakeDependentOption)
project(yuzu)
-# Get Git submodule dependencies
-find_package(Git QUIET)
-if(GIT_FOUND AND EXISTS "${PROJECT_SOURCE_DIR}/.git")
- execute_process(COMMAND ${GIT_EXECUTABLE} submodule update --init --recursive
- WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
- RESULT_VARIABLE GIT_SUBMOD_RESULT)
- if(NOT GIT_SUBMOD_RESULT EQUAL "0")
- message(FATAL_ERROR "git submodule update --init --recursive failed with ${GIT_SUBMOD_RESULT}, "
- "please checkout submodules manually with \"git submodule update --init --recursive\"")
- endif()
-endif()
-
# Set bundled sdl2/qt as dependent options.
# OFF by default, but if ENABLE_SDL2 and MSVC are true then ON
option(ENABLE_SDL2 "Enable the SDL2 frontend" ON)
@@ -45,6 +33,22 @@ if(NOT EXISTS ${PROJECT_SOURCE_DIR}/.git/hooks/pre-commit)
DESTINATION ${PROJECT_SOURCE_DIR}/.git/hooks)
endif()
+# Sanity check : Check that all submodules are present
+# =======================================================================
+
+function(check_submodules_present)
+ file(READ "${PROJECT_SOURCE_DIR}/.gitmodules" gitmodules)
+ string(REGEX MATCHALL "path *= *[^ \t\r\n]*" gitmodules ${gitmodules})
+ foreach(module ${gitmodules})
+ string(REGEX REPLACE "path *= *" "" module ${module})
+ if (NOT EXISTS "${PROJECT_SOURCE_DIR}/${module}/.git")
+ message(FATAL_ERROR "Git submodule ${module} not found. "
+ "Please run: git submodule update --init --recursive")
+ endif()
+ endforeach()
+endfunction()
+check_submodules_present()
+
configure_file(${PROJECT_SOURCE_DIR}/dist/compatibility_list/compatibility_list.qrc
${PROJECT_BINARY_DIR}/dist/compatibility_list/compatibility_list.qrc
COPYONLY)
diff --git a/CMakeModules/GenerateSCMRev.cmake b/CMakeModules/GenerateSCMRev.cmake
index 31edeb63d..dd65cfe42 100644
--- a/CMakeModules/GenerateSCMRev.cmake
+++ b/CMakeModules/GenerateSCMRev.cmake
@@ -70,6 +70,7 @@ set(HASH_FILES
"${VIDEO_CORE}/shader/decode/half_set.cpp"
"${VIDEO_CORE}/shader/decode/half_set_predicate.cpp"
"${VIDEO_CORE}/shader/decode/hfma2.cpp"
+ "${VIDEO_CORE}/shader/decode/image.cpp"
"${VIDEO_CORE}/shader/decode/integer_set.cpp"
"${VIDEO_CORE}/shader/decode/integer_set_predicate.cpp"
"${VIDEO_CORE}/shader/decode/memory.cpp"
diff --git a/src/audio_core/stream.cpp b/src/audio_core/stream.cpp
index 982c7af2f..6a5f53a57 100644
--- a/src/audio_core/stream.cpp
+++ b/src/audio_core/stream.cpp
@@ -105,7 +105,7 @@ void Stream::PlayNextBuffer() {
sink_stream.EnqueueSamples(GetNumChannels(), active_buffer->GetSamples());
- core_timing.ScheduleEventThreadsafe(GetBufferReleaseCycles(*active_buffer), release_event, {});
+ core_timing.ScheduleEvent(GetBufferReleaseCycles(*active_buffer), release_event, {});
}
void Stream::ReleaseActiveBuffer() {
diff --git a/src/common/CMakeLists.txt b/src/common/CMakeLists.txt
index 198b3fe07..2554add28 100644
--- a/src/common/CMakeLists.txt
+++ b/src/common/CMakeLists.txt
@@ -44,6 +44,7 @@ add_custom_command(OUTPUT scm_rev.cpp
"${VIDEO_CORE}/shader/decode/half_set.cpp"
"${VIDEO_CORE}/shader/decode/half_set_predicate.cpp"
"${VIDEO_CORE}/shader/decode/hfma2.cpp"
+ "${VIDEO_CORE}/shader/decode/image.cpp"
"${VIDEO_CORE}/shader/decode/integer_set.cpp"
"${VIDEO_CORE}/shader/decode/integer_set_predicate.cpp"
"${VIDEO_CORE}/shader/decode/memory.cpp"
@@ -74,6 +75,7 @@ add_library(common STATIC
assert.h
detached_tasks.cpp
detached_tasks.h
+ binary_find.h
bit_field.h
bit_util.h
cityhash.cpp
diff --git a/src/common/alignment.h b/src/common/alignment.h
index d94a2291f..617b14d9b 100644
--- a/src/common/alignment.h
+++ b/src/common/alignment.h
@@ -20,6 +20,12 @@ constexpr T AlignDown(T value, std::size_t size) {
}
template <typename T>
+constexpr T AlignBits(T value, std::size_t align) {
+ static_assert(std::is_unsigned_v<T>, "T must be an unsigned value.");
+ return static_cast<T>((value + ((1ULL << align) - 1)) >> align << align);
+}
+
+template <typename T>
constexpr bool Is4KBAligned(T value) {
static_assert(std::is_unsigned_v<T>, "T must be an unsigned value.");
return (value & 0xFFF) == 0;
diff --git a/src/common/binary_find.h b/src/common/binary_find.h
new file mode 100644
index 000000000..5cc523bf9
--- /dev/null
+++ b/src/common/binary_find.h
@@ -0,0 +1,21 @@
+// Copyright 2019 yuzu emulator team
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <algorithm>
+
+namespace Common {
+
+template <class ForwardIt, class T, class Compare = std::less<>>
+ForwardIt BinaryFind(ForwardIt first, ForwardIt last, const T& value, Compare comp = {}) {
+ // Note: BOTH type T and the type after ForwardIt is dereferenced
+ // must be implicitly convertible to BOTH Type1 and Type2, used in Compare.
+ // This is stricter than lower_bound requirement (see above)
+
+ first = std::lower_bound(first, last, value, comp);
+ return first != last && !comp(value, *first) ? first : last;
+}
+
+} // namespace Common
diff --git a/src/common/bit_util.h b/src/common/bit_util.h
index d032df413..6f7d5a947 100644
--- a/src/common/bit_util.h
+++ b/src/common/bit_util.h
@@ -97,4 +97,48 @@ inline u32 CountTrailingZeroes64(u64 value) {
}
#endif
+#ifdef _MSC_VER
+
+inline u32 MostSignificantBit32(const u32 value) {
+ unsigned long result;
+ _BitScanReverse(&result, value);
+ return static_cast<u32>(result);
+}
+
+inline u32 MostSignificantBit64(const u64 value) {
+ unsigned long result;
+ _BitScanReverse64(&result, value);
+ return static_cast<u32>(result);
+}
+
+#else
+
+inline u32 MostSignificantBit32(const u32 value) {
+ return 31U - static_cast<u32>(__builtin_clz(value));
+}
+
+inline u32 MostSignificantBit64(const u64 value) {
+ return 63U - static_cast<u32>(__builtin_clzll(value));
+}
+
+#endif
+
+inline u32 Log2Floor32(const u32 value) {
+ return MostSignificantBit32(value);
+}
+
+inline u32 Log2Ceil32(const u32 value) {
+ const u32 log2_f = Log2Floor32(value);
+ return log2_f + ((value ^ (1U << log2_f)) != 0U);
+}
+
+inline u32 Log2Floor64(const u64 value) {
+ return MostSignificantBit64(value);
+}
+
+inline u32 Log2Ceil64(const u64 value) {
+ const u64 log2_f = static_cast<u64>(Log2Floor64(value));
+ return static_cast<u32>(log2_f + ((value ^ (1ULL << log2_f)) != 0ULL));
+}
+
} // namespace Common
diff --git a/src/common/common_funcs.h b/src/common/common_funcs.h
index 8b0d34da6..04ecac959 100644
--- a/src/common/common_funcs.h
+++ b/src/common/common_funcs.h
@@ -4,6 +4,7 @@
#pragma once
+#include <algorithm>
#include <string>
#if !defined(ARCHITECTURE_x86_64)
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt
index 0a922430f..2f33b2090 100644
--- a/src/core/CMakeLists.txt
+++ b/src/core/CMakeLists.txt
@@ -175,6 +175,7 @@ add_library(core STATIC
hle/service/acc/acc_u0.h
hle/service/acc/acc_u1.cpp
hle/service/acc/acc_u1.h
+ hle/service/acc/errors.h
hle/service/acc/profile_manager.cpp
hle/service/acc/profile_manager.h
hle/service/am/am.cpp
@@ -272,6 +273,7 @@ add_library(core STATIC
hle/service/filesystem/fsp_srv.h
hle/service/fgm/fgm.cpp
hle/service/fgm/fgm.h
+ hle/service/friend/errors.h
hle/service/friend/friend.cpp
hle/service/friend/friend.h
hle/service/friend/interface.cpp
@@ -431,6 +433,8 @@ add_library(core STATIC
hle/service/time/interface.h
hle/service/time/time.cpp
hle/service/time/time.h
+ hle/service/time/time_sharedmemory.cpp
+ hle/service/time/time_sharedmemory.h
hle/service/usb/usb.cpp
hle/service/usb/usb.h
hle/service/vi/display/vi_display.cpp
@@ -478,6 +482,8 @@ add_library(core STATIC
settings.h
telemetry_session.cpp
telemetry_session.h
+ tools/freezer.cpp
+ tools/freezer.h
)
create_target_directory_groups(core)
diff --git a/src/core/core.cpp b/src/core/core.cpp
index fa6fc357d..4aceee785 100644
--- a/src/core/core.cpp
+++ b/src/core/core.cpp
@@ -34,6 +34,7 @@
#include "core/reporter.h"
#include "core/settings.h"
#include "core/telemetry_session.h"
+#include "core/tools/freezer.h"
#include "file_sys/cheat_engine.h"
#include "file_sys/patch_manager.h"
#include "video_core/debug_utils/debug_utils.h"
@@ -143,7 +144,7 @@ struct System::Impl {
telemetry_session = std::make_unique<Core::TelemetrySession>();
service_manager = std::make_shared<Service::SM::ServiceManager>();
- Service::Init(service_manager, system, *virtual_filesystem);
+ Service::Init(service_manager, system);
GDBStub::Init();
renderer = VideoCore::CreateRenderer(emu_window, system);
@@ -301,6 +302,7 @@ struct System::Impl {
bool is_powered_on = false;
std::unique_ptr<FileSys::CheatEngine> cheat_engine;
+ std::unique_ptr<Tools::Freezer> memory_freezer;
/// Frontend applets
Service::AM::Applets::AppletManager applet_manager;
diff --git a/src/core/core_cpu.cpp b/src/core/core_cpu.cpp
index ba63c3e61..99b7d387d 100644
--- a/src/core/core_cpu.cpp
+++ b/src/core/core_cpu.cpp
@@ -53,7 +53,7 @@ bool CpuBarrier::Rendezvous() {
Cpu::Cpu(System& system, ExclusiveMonitor& exclusive_monitor, CpuBarrier& cpu_barrier,
std::size_t core_index)
: cpu_barrier{cpu_barrier}, core_timing{system.CoreTiming()}, core_index{core_index} {
- if (Settings::values.use_cpu_jit) {
+ if (Settings::values.cpu_jit_enabled) {
#ifdef ARCHITECTURE_x86_64
arm_interface = std::make_unique<ARM_Dynarmic>(system, exclusive_monitor, core_index);
#else
@@ -70,7 +70,7 @@ Cpu::Cpu(System& system, ExclusiveMonitor& exclusive_monitor, CpuBarrier& cpu_ba
Cpu::~Cpu() = default;
std::unique_ptr<ExclusiveMonitor> Cpu::MakeExclusiveMonitor(std::size_t num_cores) {
- if (Settings::values.use_cpu_jit) {
+ if (Settings::values.cpu_jit_enabled) {
#ifdef ARCHITECTURE_x86_64
return std::make_unique<DynarmicExclusiveMonitor>(num_cores);
#else
diff --git a/src/core/core_timing.cpp b/src/core/core_timing.cpp
index 41adb2302..a58f7b131 100644
--- a/src/core/core_timing.cpp
+++ b/src/core/core_timing.cpp
@@ -56,12 +56,12 @@ void CoreTiming::Initialize() {
}
void CoreTiming::Shutdown() {
- MoveEvents();
ClearPendingEvents();
UnregisterAllEvents();
}
EventType* CoreTiming::RegisterEvent(const std::string& name, TimedCallback callback) {
+ std::lock_guard guard{inner_mutex};
// check for existing type with same name.
// we want event type names to remain unique so that we can use them for serialization.
ASSERT_MSG(event_types.find(name) == event_types.end(),
@@ -82,6 +82,7 @@ void CoreTiming::UnregisterAllEvents() {
void CoreTiming::ScheduleEvent(s64 cycles_into_future, const EventType* event_type, u64 userdata) {
ASSERT(event_type != nullptr);
+ std::lock_guard guard{inner_mutex};
const s64 timeout = GetTicks() + cycles_into_future;
// If this event needs to be scheduled before the next advance(), force one early
@@ -93,12 +94,8 @@ void CoreTiming::ScheduleEvent(s64 cycles_into_future, const EventType* event_ty
std::push_heap(event_queue.begin(), event_queue.end(), std::greater<>());
}
-void CoreTiming::ScheduleEventThreadsafe(s64 cycles_into_future, const EventType* event_type,
- u64 userdata) {
- ts_queue.Push(Event{global_timer + cycles_into_future, 0, userdata, event_type});
-}
-
void CoreTiming::UnscheduleEvent(const EventType* event_type, u64 userdata) {
+ std::lock_guard guard{inner_mutex};
const auto itr = std::remove_if(event_queue.begin(), event_queue.end(), [&](const Event& e) {
return e.type == event_type && e.userdata == userdata;
});
@@ -110,10 +107,6 @@ void CoreTiming::UnscheduleEvent(const EventType* event_type, u64 userdata) {
}
}
-void CoreTiming::UnscheduleEventThreadsafe(const EventType* event_type, u64 userdata) {
- unschedule_queue.Push(std::make_pair(event_type, userdata));
-}
-
u64 CoreTiming::GetTicks() const {
u64 ticks = static_cast<u64>(global_timer);
if (!is_global_timer_sane) {
@@ -135,6 +128,7 @@ void CoreTiming::ClearPendingEvents() {
}
void CoreTiming::RemoveEvent(const EventType* event_type) {
+ std::lock_guard guard{inner_mutex};
const auto itr = std::remove_if(event_queue.begin(), event_queue.end(),
[&](const Event& e) { return e.type == event_type; });
@@ -145,11 +139,6 @@ void CoreTiming::RemoveEvent(const EventType* event_type) {
}
}
-void CoreTiming::RemoveNormalAndThreadsafeEvent(const EventType* event_type) {
- MoveEvents();
- RemoveEvent(event_type);
-}
-
void CoreTiming::ForceExceptionCheck(s64 cycles) {
cycles = std::max<s64>(0, cycles);
if (downcount <= cycles) {
@@ -162,19 +151,8 @@ void CoreTiming::ForceExceptionCheck(s64 cycles) {
downcount = static_cast<int>(cycles);
}
-void CoreTiming::MoveEvents() {
- for (Event ev; ts_queue.Pop(ev);) {
- ev.fifo_order = event_fifo_id++;
- event_queue.emplace_back(std::move(ev));
- std::push_heap(event_queue.begin(), event_queue.end(), std::greater<>());
- }
-}
-
void CoreTiming::Advance() {
- MoveEvents();
- for (std::pair<const EventType*, u64> ev; unschedule_queue.Pop(ev);) {
- UnscheduleEvent(ev.first, ev.second);
- }
+ std::unique_lock<std::mutex> guard(inner_mutex);
const int cycles_executed = slice_length - downcount;
global_timer += cycles_executed;
@@ -186,7 +164,9 @@ void CoreTiming::Advance() {
Event evt = std::move(event_queue.front());
std::pop_heap(event_queue.begin(), event_queue.end(), std::greater<>());
event_queue.pop_back();
+ inner_mutex.unlock();
evt.type->callback(evt.userdata, global_timer - evt.time);
+ inner_mutex.lock();
}
is_global_timer_sane = false;
diff --git a/src/core/core_timing.h b/src/core/core_timing.h
index 9d2efde37..161c7007d 100644
--- a/src/core/core_timing.h
+++ b/src/core/core_timing.h
@@ -6,6 +6,7 @@
#include <chrono>
#include <functional>
+#include <mutex>
#include <string>
#include <unordered_map>
#include <vector>
@@ -67,7 +68,7 @@ public:
///
EventType* RegisterEvent(const std::string& name, TimedCallback callback);
- /// Unregisters all registered events thus far.
+ /// Unregisters all registered events thus far. Note: not thread unsafe
void UnregisterAllEvents();
/// After the first Advance, the slice lengths and the downcount will be reduced whenever an
@@ -76,20 +77,10 @@ public:
/// Scheduling from a callback will not update the downcount until the Advance() completes.
void ScheduleEvent(s64 cycles_into_future, const EventType* event_type, u64 userdata = 0);
- /// This is to be called when outside of hle threads, such as the graphics thread, wants to
- /// schedule things to be executed on the main thread.
- ///
- /// @note This doesn't change slice_length and thus events scheduled by this might be
- /// called with a delay of up to MAX_SLICE_LENGTH
- void ScheduleEventThreadsafe(s64 cycles_into_future, const EventType* event_type,
- u64 userdata = 0);
-
void UnscheduleEvent(const EventType* event_type, u64 userdata);
- void UnscheduleEventThreadsafe(const EventType* event_type, u64 userdata);
/// We only permit one event of each type in the queue at a time.
void RemoveEvent(const EventType* event_type);
- void RemoveNormalAndThreadsafeEvent(const EventType* event_type);
void ForceExceptionCheck(s64 cycles);
@@ -120,7 +111,6 @@ private:
/// Clear all pending events. This should ONLY be done on exit.
void ClearPendingEvents();
- void MoveEvents();
s64 global_timer = 0;
s64 idled_cycles = 0;
@@ -143,14 +133,9 @@ private:
// remain stable regardless of rehashes/resizing.
std::unordered_map<std::string, EventType> event_types;
- // The queue for storing the events from other threads threadsafe until they will be added
- // to the event_queue by the emu thread
- Common::MPSCQueue<Event> ts_queue;
-
- // The queue for unscheduling the events from other threads threadsafe
- Common::MPSCQueue<std::pair<const EventType*, u64>> unschedule_queue;
-
EventType* ev_lost = nullptr;
+
+ std::mutex inner_mutex;
};
} // namespace Core::Timing
diff --git a/src/core/file_sys/nca_metadata.h b/src/core/file_sys/nca_metadata.h
index 84d5cd1e0..1f82fff0a 100644
--- a/src/core/file_sys/nca_metadata.h
+++ b/src/core/file_sys/nca_metadata.h
@@ -35,9 +35,9 @@ enum class ContentRecordType : u8 {
Program = 1,
Data = 2,
Control = 3,
- Manual = 4,
- Legal = 5,
- Patch = 6,
+ HtmlDocument = 4,
+ LegalInformation = 5,
+ DeltaFragment = 6,
};
struct ContentRecord {
diff --git a/src/core/file_sys/registered_cache.cpp b/src/core/file_sys/registered_cache.cpp
index 4608490e0..3725b10f7 100644
--- a/src/core/file_sys/registered_cache.cpp
+++ b/src/core/file_sys/registered_cache.cpp
@@ -99,7 +99,7 @@ ContentRecordType GetCRTypeFromNCAType(NCAContentType type) {
return ContentRecordType::Data;
case NCAContentType::Manual:
// TODO(DarkLordZach): Peek at NCA contents to differentiate Manual and Legal.
- return ContentRecordType::Manual;
+ return ContentRecordType::HtmlDocument;
default:
UNREACHABLE_MSG("Invalid NCAContentType={:02X}", static_cast<u8>(type));
}
@@ -397,8 +397,8 @@ InstallResult RegisteredCache::InstallEntry(const NSP& nsp, bool overwrite_if_ex
});
if (meta_iter == ncas.end()) {
- LOG_ERROR(Loader, "The XCI you are attempting to install does not have a metadata NCA and "
- "is therefore malformed. Double check your encryption keys.");
+ LOG_ERROR(Loader, "The file you are attempting to install does not have a metadata NCA and "
+ "is therefore malformed. Check your encryption keys.");
return InstallResult::ErrorMetaFailed;
}
@@ -415,6 +415,9 @@ InstallResult RegisteredCache::InstallEntry(const NSP& nsp, bool overwrite_if_ex
const auto cnmt_file = section0->GetFiles()[0];
const CNMT cnmt(cnmt_file);
for (const auto& record : cnmt.GetContentRecords()) {
+ // Ignore DeltaFragments, they are not useful to us
+ if (record.type == ContentRecordType::DeltaFragment)
+ continue;
const auto nca = GetNCAFromNSPForID(nsp, record.nca_id);
if (nca == nullptr)
return InstallResult::ErrorCopyFailed;
diff --git a/src/core/file_sys/submission_package.cpp b/src/core/file_sys/submission_package.cpp
index d0428a457..8b3b14e25 100644
--- a/src/core/file_sys/submission_package.cpp
+++ b/src/core/file_sys/submission_package.cpp
@@ -248,10 +248,13 @@ void NSP::ReadNCAs(const std::vector<VirtualFile>& files) {
auto next_file = pfs->GetFile(fmt::format("{}.nca", id_string));
if (next_file == nullptr) {
- LOG_WARNING(Service_FS,
- "NCA with ID {}.nca is listed in content metadata, but cannot "
- "be found in PFS. NSP appears to be corrupted.",
- id_string);
+ if (rec.type != ContentRecordType::DeltaFragment) {
+ LOG_WARNING(Service_FS,
+ "NCA with ID {}.nca is listed in content metadata, but cannot "
+ "be found in PFS. NSP appears to be corrupted.",
+ id_string);
+ }
+
continue;
}
diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp
index 7cfc513a1..f45ef05f6 100644
--- a/src/core/hle/kernel/process.cpp
+++ b/src/core/hle/kernel/process.cpp
@@ -3,6 +3,7 @@
// Refer to the license.txt file included.
#include <algorithm>
+#include <bitset>
#include <memory>
#include <random>
#include "common/alignment.h"
@@ -48,8 +49,58 @@ void SetupMainThread(Process& owner_process, KernelCore& kernel, u32 priority) {
}
} // Anonymous namespace
-SharedPtr<Process> Process::Create(Core::System& system, std::string name,
- Process::ProcessType type) {
+// Represents a page used for thread-local storage.
+//
+// Each TLS page contains slots that may be used by processes and threads.
+// Every process and thread is created with a slot in some arbitrary page
+// (whichever page happens to have an available slot).
+class TLSPage {
+public:
+ static constexpr std::size_t num_slot_entries = Memory::PAGE_SIZE / Memory::TLS_ENTRY_SIZE;
+
+ explicit TLSPage(VAddr address) : base_address{address} {}
+
+ bool HasAvailableSlots() const {
+ return !is_slot_used.all();
+ }
+
+ VAddr GetBaseAddress() const {
+ return base_address;
+ }
+
+ std::optional<VAddr> ReserveSlot() {
+ for (std::size_t i = 0; i < is_slot_used.size(); i++) {
+ if (is_slot_used[i]) {
+ continue;
+ }
+
+ is_slot_used[i] = true;
+ return base_address + (i * Memory::TLS_ENTRY_SIZE);
+ }
+
+ return std::nullopt;
+ }
+
+ void ReleaseSlot(VAddr address) {
+ // Ensure that all given addresses are consistent with how TLS pages
+ // are intended to be used when releasing slots.
+ ASSERT(IsWithinPage(address));
+ ASSERT((address % Memory::TLS_ENTRY_SIZE) == 0);
+
+ const std::size_t index = (address - base_address) / Memory::TLS_ENTRY_SIZE;
+ is_slot_used[index] = false;
+ }
+
+private:
+ bool IsWithinPage(VAddr address) const {
+ return base_address <= address && address < base_address + Memory::PAGE_SIZE;
+ }
+
+ VAddr base_address;
+ std::bitset<num_slot_entries> is_slot_used;
+};
+
+SharedPtr<Process> Process::Create(Core::System& system, std::string name, ProcessType type) {
auto& kernel = system.Kernel();
SharedPtr<Process> process(new Process(system));
@@ -181,61 +232,55 @@ void Process::PrepareForTermination() {
}
/**
- * Finds a free location for the TLS section of a thread.
- * @param tls_slots The TLS page array of the thread's owner process.
- * Returns a tuple of (page, slot, alloc_needed) where:
- * page: The index of the first allocated TLS page that has free slots.
- * slot: The index of the first free slot in the indicated page.
- * alloc_needed: Whether there's a need to allocate a new TLS page (All pages are full).
+ * Attempts to find a TLS page that contains a free slot for
+ * use by a thread.
+ *
+ * @returns If a page with an available slot is found, then an iterator
+ * pointing to the page is returned. Otherwise the end iterator
+ * is returned instead.
*/
-static std::tuple<std::size_t, std::size_t, bool> FindFreeThreadLocalSlot(
- const std::vector<std::bitset<8>>& tls_slots) {
- // Iterate over all the allocated pages, and try to find one where not all slots are used.
- for (std::size_t page = 0; page < tls_slots.size(); ++page) {
- const auto& page_tls_slots = tls_slots[page];
- if (!page_tls_slots.all()) {
- // We found a page with at least one free slot, find which slot it is
- for (std::size_t slot = 0; slot < page_tls_slots.size(); ++slot) {
- if (!page_tls_slots.test(slot)) {
- return std::make_tuple(page, slot, false);
- }
- }
- }
- }
-
- return std::make_tuple(0, 0, true);
+static auto FindTLSPageWithAvailableSlots(std::vector<TLSPage>& tls_pages) {
+ return std::find_if(tls_pages.begin(), tls_pages.end(),
+ [](const auto& page) { return page.HasAvailableSlots(); });
}
-VAddr Process::MarkNextAvailableTLSSlotAsUsed(Thread& thread) {
- auto [available_page, available_slot, needs_allocation] = FindFreeThreadLocalSlot(tls_slots);
- const VAddr tls_begin = vm_manager.GetTLSIORegionBaseAddress();
+VAddr Process::CreateTLSRegion() {
+ auto tls_page_iter = FindTLSPageWithAvailableSlots(tls_pages);
- if (needs_allocation) {
- tls_slots.emplace_back(0); // The page is completely available at the start
- available_page = tls_slots.size() - 1;
- available_slot = 0; // Use the first slot in the new page
+ if (tls_page_iter == tls_pages.cend()) {
+ const auto region_address =
+ vm_manager.FindFreeRegion(vm_manager.GetTLSIORegionBaseAddress(),
+ vm_manager.GetTLSIORegionEndAddress(), Memory::PAGE_SIZE);
+ ASSERT(region_address.Succeeded());
- // Allocate some memory from the end of the linear heap for this region.
- auto& tls_memory = thread.GetTLSMemory();
- tls_memory->insert(tls_memory->end(), Memory::PAGE_SIZE, 0);
+ const auto map_result = vm_manager.MapMemoryBlock(
+ *region_address, std::make_shared<std::vector<u8>>(Memory::PAGE_SIZE), 0,
+ Memory::PAGE_SIZE, MemoryState::ThreadLocal);
+ ASSERT(map_result.Succeeded());
- vm_manager.RefreshMemoryBlockMappings(tls_memory.get());
+ tls_pages.emplace_back(*region_address);
- vm_manager.MapMemoryBlock(tls_begin + available_page * Memory::PAGE_SIZE, tls_memory, 0,
- Memory::PAGE_SIZE, MemoryState::ThreadLocal);
- }
+ const auto reserve_result = tls_pages.back().ReserveSlot();
+ ASSERT(reserve_result.has_value());
- tls_slots[available_page].set(available_slot);
+ return *reserve_result;
+ }
- return tls_begin + available_page * Memory::PAGE_SIZE + available_slot * Memory::TLS_ENTRY_SIZE;
+ return *tls_page_iter->ReserveSlot();
}
-void Process::FreeTLSSlot(VAddr tls_address) {
- const VAddr tls_base = tls_address - vm_manager.GetTLSIORegionBaseAddress();
- const VAddr tls_page = tls_base / Memory::PAGE_SIZE;
- const VAddr tls_slot = (tls_base % Memory::PAGE_SIZE) / Memory::TLS_ENTRY_SIZE;
+void Process::FreeTLSRegion(VAddr tls_address) {
+ const VAddr aligned_address = Common::AlignDown(tls_address, Memory::PAGE_SIZE);
+ auto iter =
+ std::find_if(tls_pages.begin(), tls_pages.end(), [aligned_address](const auto& page) {
+ return page.GetBaseAddress() == aligned_address;
+ });
+
+ // Something has gone very wrong if we're freeing a region
+ // with no actual page available.
+ ASSERT(iter != tls_pages.cend());
- tls_slots[tls_page].reset(tls_slot);
+ iter->ReleaseSlot(tls_address);
}
void Process::LoadModule(CodeSet module_, VAddr base_addr) {
diff --git a/src/core/hle/kernel/process.h b/src/core/hle/kernel/process.h
index 248fd3840..83ea02bee 100644
--- a/src/core/hle/kernel/process.h
+++ b/src/core/hle/kernel/process.h
@@ -5,7 +5,6 @@
#pragma once
#include <array>
-#include <bitset>
#include <cstddef>
#include <list>
#include <string>
@@ -32,6 +31,7 @@ namespace Kernel {
class KernelCore;
class ResourceLimit;
class Thread;
+class TLSPage;
struct CodeSet;
@@ -260,10 +260,10 @@ public:
// Thread-local storage management
// Marks the next available region as used and returns the address of the slot.
- VAddr MarkNextAvailableTLSSlotAsUsed(Thread& thread);
+ [[nodiscard]] VAddr CreateTLSRegion();
// Frees a used TLS slot identified by the given address
- void FreeTLSSlot(VAddr tls_address);
+ void FreeTLSRegion(VAddr tls_address);
private:
explicit Process(Core::System& system);
@@ -290,7 +290,7 @@ private:
u64 code_memory_size = 0;
/// Current status of the process
- ProcessStatus status;
+ ProcessStatus status{};
/// The ID of this process
u64 process_id = 0;
@@ -310,7 +310,7 @@ private:
/// holds the TLS for a specific thread. This vector contains which parts are in use for each
/// page as a bitmask.
/// This vector will grow as more pages are allocated for new threads.
- std::vector<std::bitset<8>> tls_slots;
+ std::vector<TLSPage> tls_pages;
/// Contains the parsed process capability descriptors.
ProcessCapabilities capabilities;
@@ -339,7 +339,7 @@ private:
Mutex mutex;
/// Random values for svcGetInfo RandomEntropy
- std::array<u64, RANDOM_ENTROPY_SIZE> random_entropy;
+ std::array<u64, RANDOM_ENTROPY_SIZE> random_entropy{};
/// List of threads that are running with this process as their owner.
std::list<const Thread*> thread_list;
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index de6363ff2..332573a95 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -98,9 +98,9 @@ ResultCode MapUnmapMemorySanityChecks(const VMManager& vm_manager, VAddr dst_add
return ERR_INVALID_ADDRESS_STATE;
}
- if (!vm_manager.IsWithinNewMapRegion(dst_addr, size)) {
+ if (!vm_manager.IsWithinStackRegion(dst_addr, size)) {
LOG_ERROR(Kernel_SVC,
- "Destination is not within the new map region, addr=0x{:016X}, size=0x{:016X}",
+ "Destination is not within the stack region, addr=0x{:016X}, size=0x{:016X}",
dst_addr, size);
return ERR_INVALID_MEMORY_RANGE;
}
@@ -726,8 +726,8 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha
// 2.0.0+
ASLRRegionBaseAddr = 12,
ASLRRegionSize = 13,
- NewMapRegionBaseAddr = 14,
- NewMapRegionSize = 15,
+ StackRegionBaseAddr = 14,
+ StackRegionSize = 15,
// 3.0.0+
IsVirtualAddressMemoryEnabled = 16,
PersonalMmHeapUsage = 17,
@@ -752,8 +752,8 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha
case GetInfoType::HeapRegionSize:
case GetInfoType::ASLRRegionBaseAddr:
case GetInfoType::ASLRRegionSize:
- case GetInfoType::NewMapRegionBaseAddr:
- case GetInfoType::NewMapRegionSize:
+ case GetInfoType::StackRegionBaseAddr:
+ case GetInfoType::StackRegionSize:
case GetInfoType::TotalPhysicalMemoryAvailable:
case GetInfoType::TotalPhysicalMemoryUsed:
case GetInfoType::IsVirtualAddressMemoryEnabled:
@@ -806,12 +806,12 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha
*result = process->VMManager().GetASLRRegionSize();
return RESULT_SUCCESS;
- case GetInfoType::NewMapRegionBaseAddr:
- *result = process->VMManager().GetNewMapRegionBaseAddress();
+ case GetInfoType::StackRegionBaseAddr:
+ *result = process->VMManager().GetStackRegionBaseAddress();
return RESULT_SUCCESS;
- case GetInfoType::NewMapRegionSize:
- *result = process->VMManager().GetNewMapRegionSize();
+ case GetInfoType::StackRegionSize:
+ *result = process->VMManager().GetStackRegionSize();
return RESULT_SUCCESS;
case GetInfoType::TotalPhysicalMemoryAvailable:
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp
index c73a40977..ec529e7f2 100644
--- a/src/core/hle/kernel/thread.cpp
+++ b/src/core/hle/kernel/thread.cpp
@@ -65,7 +65,7 @@ void Thread::Stop() {
owner_process->UnregisterThread(this);
// Mark the TLS slot in the thread's page as free.
- owner_process->FreeTLSSlot(tls_address);
+ owner_process->FreeTLSRegion(tls_address);
}
void Thread::WakeAfterDelay(s64 nanoseconds) {
@@ -76,13 +76,13 @@ void Thread::WakeAfterDelay(s64 nanoseconds) {
// This function might be called from any thread so we have to be cautious and use the
// thread-safe version of ScheduleEvent.
const s64 cycles = Core::Timing::nsToCycles(std::chrono::nanoseconds{nanoseconds});
- Core::System::GetInstance().CoreTiming().ScheduleEventThreadsafe(
+ Core::System::GetInstance().CoreTiming().ScheduleEvent(
cycles, kernel.ThreadWakeupCallbackEventType(), callback_handle);
}
void Thread::CancelWakeupTimer() {
- Core::System::GetInstance().CoreTiming().UnscheduleEventThreadsafe(
- kernel.ThreadWakeupCallbackEventType(), callback_handle);
+ Core::System::GetInstance().CoreTiming().UnscheduleEvent(kernel.ThreadWakeupCallbackEventType(),
+ callback_handle);
}
static std::optional<s32> GetNextProcessorId(u64 mask) {
@@ -205,9 +205,9 @@ ResultVal<SharedPtr<Thread>> Thread::Create(KernelCore& kernel, std::string name
thread->name = std::move(name);
thread->callback_handle = kernel.ThreadWakeupCallbackHandleTable().Create(thread).Unwrap();
thread->owner_process = &owner_process;
+ thread->tls_address = thread->owner_process->CreateTLSRegion();
thread->scheduler = &system.Scheduler(processor_id);
thread->scheduler->AddThread(thread);
- thread->tls_address = thread->owner_process->MarkNextAvailableTLSSlotAsUsed(*thread);
thread->owner_process->RegisterThread(thread.get());
diff --git a/src/core/hle/kernel/thread.h b/src/core/hle/kernel/thread.h
index b4b9cda7c..07e989637 100644
--- a/src/core/hle/kernel/thread.h
+++ b/src/core/hle/kernel/thread.h
@@ -5,7 +5,6 @@
#pragma once
#include <functional>
-#include <memory>
#include <string>
#include <vector>
@@ -78,9 +77,6 @@ enum class ThreadActivity : u32 {
class Thread final : public WaitObject {
public:
- using TLSMemory = std::vector<u8>;
- using TLSMemoryPtr = std::shared_ptr<TLSMemory>;
-
using MutexWaitingThreads = std::vector<SharedPtr<Thread>>;
using ThreadContext = Core::ARM_Interface::ThreadContext;
@@ -169,14 +165,6 @@ public:
return thread_id;
}
- TLSMemoryPtr& GetTLSMemory() {
- return tls_memory;
- }
-
- const TLSMemoryPtr& GetTLSMemory() const {
- return tls_memory;
- }
-
/// Resumes a thread from waiting
void ResumeFromWait();
@@ -463,11 +451,9 @@ private:
u32 ideal_core{0xFFFFFFFF};
u64 affinity_mask{0x1};
- TLSMemoryPtr tls_memory = std::make_shared<TLSMemory>();
+ ThreadActivity activity = ThreadActivity::Normal;
std::string name;
-
- ThreadActivity activity = ThreadActivity::Normal;
};
/**
diff --git a/src/core/hle/kernel/vm_manager.cpp b/src/core/hle/kernel/vm_manager.cpp
index c929c2a52..501544090 100644
--- a/src/core/hle/kernel/vm_manager.cpp
+++ b/src/core/hle/kernel/vm_manager.cpp
@@ -152,22 +152,33 @@ ResultVal<VMManager::VMAHandle> VMManager::MapBackingMemory(VAddr target, u8* me
}
ResultVal<VAddr> VMManager::FindFreeRegion(u64 size) const {
- // Find the first Free VMA.
- const VAddr base = GetASLRRegionBaseAddress();
- const VMAHandle vma_handle = std::find_if(vma_map.begin(), vma_map.end(), [&](const auto& vma) {
- if (vma.second.type != VMAType::Free)
- return false;
+ return FindFreeRegion(GetASLRRegionBaseAddress(), GetASLRRegionEndAddress(), size);
+}
- const VAddr vma_end = vma.second.base + vma.second.size;
- return vma_end > base && vma_end >= base + size;
- });
+ResultVal<VAddr> VMManager::FindFreeRegion(VAddr begin, VAddr end, u64 size) const {
+ ASSERT(begin < end);
+ ASSERT(size <= end - begin);
- if (vma_handle == vma_map.end()) {
+ const VMAHandle vma_handle =
+ std::find_if(vma_map.begin(), vma_map.end(), [begin, end, size](const auto& vma) {
+ if (vma.second.type != VMAType::Free) {
+ return false;
+ }
+ const VAddr vma_base = vma.second.base;
+ const VAddr vma_end = vma_base + vma.second.size;
+ const VAddr assumed_base = (begin < vma_base) ? vma_base : begin;
+ const VAddr used_range = assumed_base + size;
+
+ return vma_base <= assumed_base && assumed_base < used_range && used_range < end &&
+ used_range <= vma_end;
+ });
+
+ if (vma_handle == vma_map.cend()) {
// TODO(Subv): Find the correct error code here.
return ResultCode(-1);
}
- const VAddr target = std::max(base, vma_handle->second.base);
+ const VAddr target = std::max(begin, vma_handle->second.base);
return MakeResult<VAddr>(target);
}
@@ -614,9 +625,11 @@ void VMManager::UpdatePageTableForVMA(const VirtualMemoryArea& vma) {
void VMManager::InitializeMemoryRegionRanges(FileSys::ProgramAddressSpaceType type) {
u64 map_region_size = 0;
u64 heap_region_size = 0;
- u64 new_map_region_size = 0;
+ u64 stack_region_size = 0;
u64 tls_io_region_size = 0;
+ u64 stack_and_tls_io_end = 0;
+
switch (type) {
case FileSys::ProgramAddressSpaceType::Is32Bit:
case FileSys::ProgramAddressSpaceType::Is32BitNoMap:
@@ -632,6 +645,7 @@ void VMManager::InitializeMemoryRegionRanges(FileSys::ProgramAddressSpaceType ty
map_region_size = 0;
heap_region_size = 0x80000000;
}
+ stack_and_tls_io_end = 0x40000000;
break;
case FileSys::ProgramAddressSpaceType::Is36Bit:
address_space_width = 36;
@@ -641,6 +655,7 @@ void VMManager::InitializeMemoryRegionRanges(FileSys::ProgramAddressSpaceType ty
aslr_region_end = aslr_region_base + 0xFF8000000;
map_region_size = 0x180000000;
heap_region_size = 0x180000000;
+ stack_and_tls_io_end = 0x80000000;
break;
case FileSys::ProgramAddressSpaceType::Is39Bit:
address_space_width = 39;
@@ -650,7 +665,7 @@ void VMManager::InitializeMemoryRegionRanges(FileSys::ProgramAddressSpaceType ty
aslr_region_end = aslr_region_base + 0x7FF8000000;
map_region_size = 0x1000000000;
heap_region_size = 0x180000000;
- new_map_region_size = 0x80000000;
+ stack_region_size = 0x80000000;
tls_io_region_size = 0x1000000000;
break;
default:
@@ -658,6 +673,8 @@ void VMManager::InitializeMemoryRegionRanges(FileSys::ProgramAddressSpaceType ty
return;
}
+ const u64 stack_and_tls_io_begin = aslr_region_base;
+
address_space_base = 0;
address_space_end = 1ULL << address_space_width;
@@ -668,15 +685,20 @@ void VMManager::InitializeMemoryRegionRanges(FileSys::ProgramAddressSpaceType ty
heap_region_end = heap_region_base + heap_region_size;
heap_end = heap_region_base;
- new_map_region_base = heap_region_end;
- new_map_region_end = new_map_region_base + new_map_region_size;
+ stack_region_base = heap_region_end;
+ stack_region_end = stack_region_base + stack_region_size;
- tls_io_region_base = new_map_region_end;
+ tls_io_region_base = stack_region_end;
tls_io_region_end = tls_io_region_base + tls_io_region_size;
- if (new_map_region_size == 0) {
- new_map_region_base = address_space_base;
- new_map_region_end = address_space_end;
+ if (stack_region_size == 0) {
+ stack_region_base = stack_and_tls_io_begin;
+ stack_region_end = stack_and_tls_io_end;
+ }
+
+ if (tls_io_region_size == 0) {
+ tls_io_region_base = stack_and_tls_io_begin;
+ tls_io_region_end = stack_and_tls_io_end;
}
}
@@ -868,21 +890,21 @@ bool VMManager::IsWithinMapRegion(VAddr address, u64 size) const {
return IsInsideAddressRange(address, size, GetMapRegionBaseAddress(), GetMapRegionEndAddress());
}
-VAddr VMManager::GetNewMapRegionBaseAddress() const {
- return new_map_region_base;
+VAddr VMManager::GetStackRegionBaseAddress() const {
+ return stack_region_base;
}
-VAddr VMManager::GetNewMapRegionEndAddress() const {
- return new_map_region_end;
+VAddr VMManager::GetStackRegionEndAddress() const {
+ return stack_region_end;
}
-u64 VMManager::GetNewMapRegionSize() const {
- return new_map_region_end - new_map_region_base;
+u64 VMManager::GetStackRegionSize() const {
+ return stack_region_end - stack_region_base;
}
-bool VMManager::IsWithinNewMapRegion(VAddr address, u64 size) const {
- return IsInsideAddressRange(address, size, GetNewMapRegionBaseAddress(),
- GetNewMapRegionEndAddress());
+bool VMManager::IsWithinStackRegion(VAddr address, u64 size) const {
+ return IsInsideAddressRange(address, size, GetStackRegionBaseAddress(),
+ GetStackRegionEndAddress());
}
VAddr VMManager::GetTLSIORegionBaseAddress() const {
diff --git a/src/core/hle/kernel/vm_manager.h b/src/core/hle/kernel/vm_manager.h
index dfbf7a894..9fe6ac3f4 100644
--- a/src/core/hle/kernel/vm_manager.h
+++ b/src/core/hle/kernel/vm_manager.h
@@ -362,14 +362,39 @@ public:
ResultVal<VMAHandle> MapBackingMemory(VAddr target, u8* memory, u64 size, MemoryState state);
/**
- * Finds the first free address that can hold a region of the desired size.
+ * Finds the first free memory region of the given size within
+ * the user-addressable ASLR memory region.
*
- * @param size Size of the desired region.
- * @return The found free address.
+ * @param size The size of the desired region in bytes.
+ *
+ * @returns If successful, the base address of the free region with
+ * the given size.
*/
ResultVal<VAddr> FindFreeRegion(u64 size) const;
/**
+ * Finds the first free address range that can hold a region of the desired size
+ *
+ * @param begin The starting address of the range.
+ * This is treated as an inclusive beginning address.
+ *
+ * @param end The ending address of the range.
+ * This is treated as an exclusive ending address.
+ *
+ * @param size The size of the free region to attempt to locate,
+ * in bytes.
+ *
+ * @returns If successful, the base address of the free region with
+ * the given size.
+ *
+ * @returns If unsuccessful, a result containing an error code.
+ *
+ * @pre The starting address must be less than the ending address.
+ * @pre The size must not exceed the address range itself.
+ */
+ ResultVal<VAddr> FindFreeRegion(VAddr begin, VAddr end, u64 size) const;
+
+ /**
* Maps a memory-mapped IO region at a given address.
*
* @param target The guest address to start the mapping at.
@@ -571,17 +596,17 @@ public:
/// Determines whether or not the specified range is within the map region.
bool IsWithinMapRegion(VAddr address, u64 size) const;
- /// Gets the base address of the new map region.
- VAddr GetNewMapRegionBaseAddress() const;
+ /// Gets the base address of the stack region.
+ VAddr GetStackRegionBaseAddress() const;
- /// Gets the end address of the new map region.
- VAddr GetNewMapRegionEndAddress() const;
+ /// Gets the end address of the stack region.
+ VAddr GetStackRegionEndAddress() const;
- /// Gets the total size of the new map region in bytes.
- u64 GetNewMapRegionSize() const;
+ /// Gets the total size of the stack region in bytes.
+ u64 GetStackRegionSize() const;
- /// Determines whether or not the given address range is within the new map region
- bool IsWithinNewMapRegion(VAddr address, u64 size) const;
+ /// Determines whether or not the given address range is within the stack region
+ bool IsWithinStackRegion(VAddr address, u64 size) const;
/// Gets the base address of the TLS IO region.
VAddr GetTLSIORegionBaseAddress() const;
@@ -701,8 +726,8 @@ private:
VAddr map_region_base = 0;
VAddr map_region_end = 0;
- VAddr new_map_region_base = 0;
- VAddr new_map_region_end = 0;
+ VAddr stack_region_base = 0;
+ VAddr stack_region_end = 0;
VAddr tls_io_region_base = 0;
VAddr tls_io_region_end = 0;
diff --git a/src/core/hle/service/acc/acc.cpp b/src/core/hle/service/acc/acc.cpp
index 0cd8158df..c01ee3eda 100644
--- a/src/core/hle/service/acc/acc.cpp
+++ b/src/core/hle/service/acc/acc.cpp
@@ -15,13 +15,18 @@
#include "core/file_sys/control_metadata.h"
#include "core/file_sys/patch_manager.h"
#include "core/hle/ipc_helpers.h"
+#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/process.h"
#include "core/hle/service/acc/acc.h"
#include "core/hle/service/acc/acc_aa.h"
#include "core/hle/service/acc/acc_su.h"
#include "core/hle/service/acc/acc_u0.h"
#include "core/hle/service/acc/acc_u1.h"
+#include "core/hle/service/acc/errors.h"
#include "core/hle/service/acc/profile_manager.h"
+#include "core/hle/service/glue/arp.h"
+#include "core/hle/service/glue/manager.h"
+#include "core/hle/service/sm/sm.h"
#include "core/loader/loader.h"
namespace Service::Account {
@@ -217,10 +222,72 @@ void Module::Interface::IsUserRegistrationRequestPermitted(Kernel::HLERequestCon
rb.Push(profile_manager->CanSystemRegisterUser());
}
-void Module::Interface::InitializeApplicationInfoOld(Kernel::HLERequestContext& ctx) {
- LOG_WARNING(Service_ACC, "(STUBBED) called");
+void Module::Interface::InitializeApplicationInfo(Kernel::HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ auto pid = rp.Pop<u64>();
+
+ LOG_DEBUG(Service_ACC, "called, process_id={}", pid);
IPC::ResponseBuilder rb{ctx, 2};
- rb.Push(RESULT_SUCCESS);
+ rb.Push(InitializeApplicationInfoBase(pid));
+}
+
+void Module::Interface::InitializeApplicationInfoRestricted(Kernel::HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ auto pid = rp.Pop<u64>();
+
+ LOG_WARNING(Service_ACC, "(Partial implementation) called, process_id={}", pid);
+
+ // TODO(ogniK): We require checking if the user actually owns the title and what not. As of
+ // currently, we assume the user owns the title. InitializeApplicationInfoBase SHOULD be called
+ // first then we do extra checks if the game is a digital copy.
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(InitializeApplicationInfoBase(pid));
+}
+
+ResultCode Module::Interface::InitializeApplicationInfoBase(u64 process_id) {
+ if (application_info) {
+ LOG_ERROR(Service_ACC, "Application already initialized");
+ return ERR_ACCOUNTINFO_ALREADY_INITIALIZED;
+ }
+
+ const auto& list = system.Kernel().GetProcessList();
+ const auto iter = std::find_if(list.begin(), list.end(), [&process_id](const auto& process) {
+ return process->GetProcessID() == process_id;
+ });
+
+ if (iter == list.end()) {
+ LOG_ERROR(Service_ACC, "Failed to find process ID");
+ application_info.application_type = ApplicationType::Unknown;
+
+ return ERR_ACCOUNTINFO_BAD_APPLICATION;
+ }
+
+ const auto launch_property = system.GetARPManager().GetLaunchProperty((*iter)->GetTitleID());
+
+ if (launch_property.Failed()) {
+ LOG_ERROR(Service_ACC, "Failed to get launch property");
+ return ERR_ACCOUNTINFO_BAD_APPLICATION;
+ }
+
+ switch (launch_property->base_game_storage_id) {
+ case FileSys::StorageId::GameCard:
+ application_info.application_type = ApplicationType::GameCard;
+ break;
+ case FileSys::StorageId::Host:
+ case FileSys::StorageId::NandUser:
+ case FileSys::StorageId::SdCard:
+ application_info.application_type = ApplicationType::Digital;
+ break;
+ default:
+ LOG_ERROR(Service_ACC, "Invalid game storage ID");
+ return ERR_ACCOUNTINFO_BAD_APPLICATION;
+ }
+
+ LOG_WARNING(Service_ACC, "ApplicationInfo init required");
+ // TODO(ogniK): Actual initalization here
+
+ return RESULT_SUCCESS;
}
void Module::Interface::GetBaasAccountManagerForApplication(Kernel::HLERequestContext& ctx) {
diff --git a/src/core/hle/service/acc/acc.h b/src/core/hle/service/acc/acc.h
index 350f123a0..f651773b7 100644
--- a/src/core/hle/service/acc/acc.h
+++ b/src/core/hle/service/acc/acc.h
@@ -4,6 +4,7 @@
#pragma once
+#include "core/hle/service/glue/manager.h"
#include "core/hle/service/service.h"
namespace Service::Account {
@@ -25,12 +26,33 @@ public:
void ListOpenUsers(Kernel::HLERequestContext& ctx);
void GetLastOpenedUser(Kernel::HLERequestContext& ctx);
void GetProfile(Kernel::HLERequestContext& ctx);
- void InitializeApplicationInfoOld(Kernel::HLERequestContext& ctx);
+ void InitializeApplicationInfo(Kernel::HLERequestContext& ctx);
+ void InitializeApplicationInfoRestricted(Kernel::HLERequestContext& ctx);
void GetBaasAccountManagerForApplication(Kernel::HLERequestContext& ctx);
void IsUserRegistrationRequestPermitted(Kernel::HLERequestContext& ctx);
void TrySelectUserWithoutInteraction(Kernel::HLERequestContext& ctx);
void IsUserAccountSwitchLocked(Kernel::HLERequestContext& ctx);
+ private:
+ ResultCode InitializeApplicationInfoBase(u64 process_id);
+
+ enum class ApplicationType : u32_le {
+ GameCard = 0,
+ Digital = 1,
+ Unknown = 3,
+ };
+
+ struct ApplicationInfo {
+ Service::Glue::ApplicationLaunchProperty launch_property;
+ ApplicationType application_type;
+
+ constexpr explicit operator bool() const {
+ return launch_property.title_id != 0x0;
+ }
+ };
+
+ ApplicationInfo application_info{};
+
protected:
std::shared_ptr<Module> module;
std::shared_ptr<ProfileManager> profile_manager;
diff --git a/src/core/hle/service/acc/acc_u0.cpp b/src/core/hle/service/acc/acc_u0.cpp
index 2f239e8c0..0ac19f4ff 100644
--- a/src/core/hle/service/acc/acc_u0.cpp
+++ b/src/core/hle/service/acc/acc_u0.cpp
@@ -22,7 +22,7 @@ ACC_U0::ACC_U0(std::shared_ptr<Module> module, std::shared_ptr<ProfileManager> p
{51, &ACC_U0::TrySelectUserWithoutInteraction, "TrySelectUserWithoutInteraction"},
{60, nullptr, "ListOpenContextStoredUsers"},
{99, nullptr, "DebugActivateOpenContextRetention"},
- {100, &ACC_U0::InitializeApplicationInfoOld, "InitializeApplicationInfoOld"},
+ {100, &ACC_U0::InitializeApplicationInfo, "InitializeApplicationInfo"},
{101, &ACC_U0::GetBaasAccountManagerForApplication, "GetBaasAccountManagerForApplication"},
{102, nullptr, "AuthenticateApplicationAsync"},
{103, nullptr, "CheckNetworkServiceAvailabilityAsync"},
@@ -31,7 +31,7 @@ ACC_U0::ACC_U0(std::shared_ptr<Module> module, std::shared_ptr<ProfileManager> p
{120, nullptr, "CreateGuestLoginRequest"},
{130, nullptr, "LoadOpenContext"},
{131, nullptr, "ListOpenContextStoredUsers"},
- {140, nullptr, "InitializeApplicationInfo"},
+ {140, &ACC_U0::InitializeApplicationInfoRestricted, "InitializeApplicationInfoRestricted"},
{141, nullptr, "ListQualifiedUsers"},
{150, &ACC_U0::IsUserAccountSwitchLocked, "IsUserAccountSwitchLocked"},
};
diff --git a/src/core/hle/service/acc/errors.h b/src/core/hle/service/acc/errors.h
new file mode 100644
index 000000000..1f0577239
--- /dev/null
+++ b/src/core/hle/service/acc/errors.h
@@ -0,0 +1,14 @@
+// Copyright 2019 yuzu emulator team
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include "core/hle/result.h"
+
+namespace Service::Account {
+
+constexpr ResultCode ERR_ACCOUNTINFO_BAD_APPLICATION{ErrorModule::Account, 22};
+constexpr ResultCode ERR_ACCOUNTINFO_ALREADY_INITIALIZED{ErrorModule::Account, 41};
+
+} // namespace Service::Account
diff --git a/src/core/hle/service/am/am.cpp b/src/core/hle/service/am/am.cpp
index 4b5baf283..9fdcf2965 100644
--- a/src/core/hle/service/am/am.cpp
+++ b/src/core/hle/service/am/am.cpp
@@ -271,7 +271,7 @@ ISelfController::ISelfController(std::shared_ptr<NVFlinger::NVFlinger> nvflinger
{70, nullptr, "ReportMultimediaError"},
{71, nullptr, "GetCurrentIlluminanceEx"},
{80, nullptr, "SetWirelessPriorityMode"},
- {90, nullptr, "GetAccumulatedSuspendedTickValue"},
+ {90, &ISelfController::GetAccumulatedSuspendedTickValue, "GetAccumulatedSuspendedTickValue"},
{91, &ISelfController::GetAccumulatedSuspendedTickChangedEvent, "GetAccumulatedSuspendedTickChangedEvent"},
{100, nullptr, "SetAlbumImageTakenNotificationEnabled"},
{1000, nullptr, "GetDebugStorageChannel"},
@@ -284,10 +284,14 @@ ISelfController::ISelfController(std::shared_ptr<NVFlinger::NVFlinger> nvflinger
launchable_event = Kernel::WritableEvent::CreateEventPair(kernel, Kernel::ResetType::Manual,
"ISelfController:LaunchableEvent");
- // TODO(ogniK): Figure out where, when and why this event gets signalled
+ // This event is created by AM on the first time GetAccumulatedSuspendedTickChangedEvent() is
+ // called. Yuzu can just create it unconditionally, since it doesn't need to support multiple
+ // ISelfControllers. The event is signaled on creation, and on transition from suspended -> not
+ // suspended if the event has previously been created by a call to
+ // GetAccumulatedSuspendedTickChangedEvent.
accumulated_suspended_tick_changed_event = Kernel::WritableEvent::CreateEventPair(
kernel, Kernel::ResetType::Manual, "ISelfController:AccumulatedSuspendedTickChangedEvent");
- accumulated_suspended_tick_changed_event.writable->Signal(); // Is signalled on creation
+ accumulated_suspended_tick_changed_event.writable->Signal();
}
ISelfController::~ISelfController() = default;
@@ -450,11 +454,19 @@ void ISelfController::GetIdleTimeDetectionExtension(Kernel::HLERequestContext& c
rb.Push<u32>(idle_time_detection_extension);
}
+void ISelfController::GetAccumulatedSuspendedTickValue(Kernel::HLERequestContext& ctx) {
+ LOG_DEBUG(Service_AM, "called.");
+
+ // This command returns the total number of system ticks since ISelfController creation
+ // where the game was suspended. Since Yuzu doesn't implement game suspension, this command
+ // can just always return 0 ticks.
+ IPC::ResponseBuilder rb{ctx, 4};
+ rb.Push(RESULT_SUCCESS);
+ rb.Push<u64>(0);
+}
+
void ISelfController::GetAccumulatedSuspendedTickChangedEvent(Kernel::HLERequestContext& ctx) {
- // The implementation of this function is fine as is, the reason we're labelling it as stubbed
- // is because we're currently unsure when and where accumulated_suspended_tick_changed_event is
- // actually signalled for the time being.
- LOG_WARNING(Service_AM, "(STUBBED) called");
+ LOG_DEBUG(Service_AM, "called.");
IPC::ResponseBuilder rb{ctx, 2, 1};
rb.Push(RESULT_SUCCESS);
diff --git a/src/core/hle/service/am/am.h b/src/core/hle/service/am/am.h
index 88d1ba27c..14b010164 100644
--- a/src/core/hle/service/am/am.h
+++ b/src/core/hle/service/am/am.h
@@ -133,6 +133,7 @@ private:
void SetHandlesRequestToDisplay(Kernel::HLERequestContext& ctx);
void SetIdleTimeDetectionExtension(Kernel::HLERequestContext& ctx);
void GetIdleTimeDetectionExtension(Kernel::HLERequestContext& ctx);
+ void GetAccumulatedSuspendedTickValue(Kernel::HLERequestContext& ctx);
void GetAccumulatedSuspendedTickChangedEvent(Kernel::HLERequestContext& ctx);
std::shared_ptr<NVFlinger::NVFlinger> nvflinger;
diff --git a/src/core/hle/service/am/applets/web_browser.cpp b/src/core/hle/service/am/applets/web_browser.cpp
index 2762e0653..f3c9fef0e 100644
--- a/src/core/hle/service/am/applets/web_browser.cpp
+++ b/src/core/hle/service/am/applets/web_browser.cpp
@@ -459,10 +459,10 @@ void WebBrowser::InitializeOffline() {
case OfflineWebSource::OfflineHtmlPage:
// While there is an AppID TLV field, in official SW this is always ignored.
title_id = 0;
- type = FileSys::ContentRecordType::Manual;
+ type = FileSys::ContentRecordType::HtmlDocument;
break;
case OfflineWebSource::ApplicationLegalInformation:
- type = FileSys::ContentRecordType::Legal;
+ type = FileSys::ContentRecordType::LegalInformation;
break;
case OfflineWebSource::SystemDataPage:
type = FileSys::ContentRecordType::Data;
diff --git a/src/core/hle/service/audio/audren_u.cpp b/src/core/hle/service/audio/audren_u.cpp
index 75db0c2dc..3711e1ea1 100644
--- a/src/core/hle/service/audio/audren_u.cpp
+++ b/src/core/hle/service/audio/audren_u.cpp
@@ -167,13 +167,12 @@ public:
{3, &IAudioDevice::GetActiveAudioDeviceName, "GetActiveAudioDeviceName"},
{4, &IAudioDevice::QueryAudioDeviceSystemEvent, "QueryAudioDeviceSystemEvent"},
{5, &IAudioDevice::GetActiveChannelCount, "GetActiveChannelCount"},
- {6, &IAudioDevice::ListAudioDeviceName,
- "ListAudioDeviceNameAuto"}, // TODO(ogniK): Confirm if autos are identical to non auto
+ {6, &IAudioDevice::ListAudioDeviceName, "ListAudioDeviceNameAuto"},
{7, &IAudioDevice::SetAudioDeviceOutputVolume, "SetAudioDeviceOutputVolumeAuto"},
{8, nullptr, "GetAudioDeviceOutputVolumeAuto"},
{10, &IAudioDevice::GetActiveAudioDeviceName, "GetActiveAudioDeviceNameAuto"},
{11, nullptr, "QueryAudioDeviceInputEvent"},
- {12, nullptr, "QueryAudioDeviceOutputEvent"},
+ {12, &IAudioDevice::QueryAudioDeviceOutputEvent, "QueryAudioDeviceOutputEvent"},
{13, nullptr, "GetAudioSystemMasterVolumeSetting"},
};
RegisterHandlers(functions);
@@ -181,6 +180,11 @@ public:
auto& kernel = Core::System::GetInstance().Kernel();
buffer_event = Kernel::WritableEvent::CreateEventPair(kernel, Kernel::ResetType::Automatic,
"IAudioOutBufferReleasedEvent");
+
+ // Should only be signalled when an audio output device has been changed, example: speaker
+ // to headset
+ audio_output_device_switch_event = Kernel::WritableEvent::CreateEventPair(
+ kernel, Kernel::ResetType::Automatic, "IAudioDevice:AudioOutputDeviceSwitchedEvent");
}
private:
@@ -237,7 +241,16 @@ private:
rb.Push<u32>(1);
}
+ void QueryAudioDeviceOutputEvent(Kernel::HLERequestContext& ctx) {
+ LOG_DEBUG(Service_Audio, "called");
+
+ IPC::ResponseBuilder rb{ctx, 2, 1};
+ rb.Push(RESULT_SUCCESS);
+ rb.PushCopyObjects(audio_output_device_switch_event.readable);
+ }
+
Kernel::EventPair buffer_event;
+ Kernel::EventPair audio_output_device_switch_event;
}; // namespace Audio
diff --git a/src/core/hle/service/filesystem/filesystem.cpp b/src/core/hle/service/filesystem/filesystem.cpp
index 1ebfeb4bf..8ce110dd1 100644
--- a/src/core/hle/service/filesystem/filesystem.cpp
+++ b/src/core/hle/service/filesystem/filesystem.cpp
@@ -472,12 +472,12 @@ void CreateFactories(FileSys::VfsFilesystem& vfs, bool overwrite) {
}
}
-void InstallInterfaces(SM::ServiceManager& service_manager, FileSys::VfsFilesystem& vfs) {
+void InstallInterfaces(Core::System& system) {
romfs_factory = nullptr;
- CreateFactories(vfs, false);
- std::make_shared<FSP_LDR>()->InstallAsService(service_manager);
- std::make_shared<FSP_PR>()->InstallAsService(service_manager);
- std::make_shared<FSP_SRV>()->InstallAsService(service_manager);
+ CreateFactories(*system.GetFilesystem(), false);
+ std::make_shared<FSP_LDR>()->InstallAsService(system.ServiceManager());
+ std::make_shared<FSP_PR>()->InstallAsService(system.ServiceManager());
+ std::make_shared<FSP_SRV>(system.GetReporter())->InstallAsService(system.ServiceManager());
}
} // namespace Service::FileSystem
diff --git a/src/core/hle/service/filesystem/filesystem.h b/src/core/hle/service/filesystem/filesystem.h
index 6481f237c..3849dd89e 100644
--- a/src/core/hle/service/filesystem/filesystem.h
+++ b/src/core/hle/service/filesystem/filesystem.h
@@ -65,7 +65,7 @@ FileSys::VirtualDir GetModificationDumpRoot(u64 title_id);
// above is called.
void CreateFactories(FileSys::VfsFilesystem& vfs, bool overwrite = true);
-void InstallInterfaces(SM::ServiceManager& service_manager, FileSys::VfsFilesystem& vfs);
+void InstallInterfaces(Core::System& system);
// A class that wraps a VfsDirectory with methods that return ResultVal and ResultCode instead of
// pointers and booleans. This makes using a VfsDirectory with switch services much easier and
diff --git a/src/core/hle/service/filesystem/fsp_srv.cpp b/src/core/hle/service/filesystem/fsp_srv.cpp
index e7df8fd98..d3cd46a9b 100644
--- a/src/core/hle/service/filesystem/fsp_srv.cpp
+++ b/src/core/hle/service/filesystem/fsp_srv.cpp
@@ -26,6 +26,7 @@
#include "core/hle/kernel/process.h"
#include "core/hle/service/filesystem/filesystem.h"
#include "core/hle/service/filesystem/fsp_srv.h"
+#include "core/reporter.h"
namespace Service::FileSystem {
@@ -613,7 +614,7 @@ private:
u64 next_entry_index = 0;
};
-FSP_SRV::FSP_SRV() : ServiceFramework("fsp-srv") {
+FSP_SRV::FSP_SRV(const Core::Reporter& reporter) : ServiceFramework("fsp-srv"), reporter(reporter) {
// clang-format off
static const FunctionInfo functions[] = {
{0, nullptr, "OpenFileSystem"},
@@ -710,14 +711,14 @@ FSP_SRV::FSP_SRV() : ServiceFramework("fsp-srv") {
{1001, nullptr, "SetSaveDataSize"},
{1002, nullptr, "SetSaveDataRootPath"},
{1003, nullptr, "DisableAutoSaveDataCreation"},
- {1004, nullptr, "SetGlobalAccessLogMode"},
+ {1004, &FSP_SRV::SetGlobalAccessLogMode, "SetGlobalAccessLogMode"},
{1005, &FSP_SRV::GetGlobalAccessLogMode, "GetGlobalAccessLogMode"},
- {1006, nullptr, "OutputAccessLogToSdCard"},
+ {1006, &FSP_SRV::OutputAccessLogToSdCard, "OutputAccessLogToSdCard"},
{1007, nullptr, "RegisterUpdatePartition"},
{1008, nullptr, "OpenRegisteredUpdatePartition"},
{1009, nullptr, "GetAndClearMemoryReportInfo"},
{1010, nullptr, "SetDataStorageRedirectTarget"},
- {1011, nullptr, "OutputAccessLogToSdCard2"},
+ {1011, &FSP_SRV::GetAccessLogVersionInfo, "GetAccessLogVersionInfo"},
{1100, nullptr, "OverrideSaveDataTransferTokenSignVerificationKey"},
{1110, nullptr, "CorruptSaveDataFileSystemBySaveDataSpaceId2"},
{1200, nullptr, "OpenMultiCommitManager"},
@@ -814,21 +815,22 @@ void FSP_SRV::OpenSaveDataInfoReaderBySaveDataSpaceId(Kernel::HLERequestContext&
rb.PushIpcInterface<ISaveDataInfoReader>(std::make_shared<ISaveDataInfoReader>(space));
}
-void FSP_SRV::GetGlobalAccessLogMode(Kernel::HLERequestContext& ctx) {
- LOG_WARNING(Service_FS, "(STUBBED) called");
+void FSP_SRV::SetGlobalAccessLogMode(Kernel::HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ log_mode = rp.PopEnum<LogMode>();
- enum class LogMode : u32 {
- Off,
- Log,
- RedirectToSdCard,
- LogToSdCard = Log | RedirectToSdCard,
- };
+ LOG_DEBUG(Service_FS, "called, log_mode={:08X}", static_cast<u32>(log_mode));
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(RESULT_SUCCESS);
+}
+
+void FSP_SRV::GetGlobalAccessLogMode(Kernel::HLERequestContext& ctx) {
+ LOG_DEBUG(Service_FS, "called");
- // Given we always want to receive logging information,
- // we always specify logging as enabled.
IPC::ResponseBuilder rb{ctx, 3};
rb.Push(RESULT_SUCCESS);
- rb.PushEnum(LogMode::Log);
+ rb.PushEnum(log_mode);
}
void FSP_SRV::OpenDataStorageByCurrentProcess(Kernel::HLERequestContext& ctx) {
@@ -902,4 +904,26 @@ void FSP_SRV::OpenPatchDataStorageByCurrentProcess(Kernel::HLERequestContext& ct
rb.Push(FileSys::ERROR_ENTITY_NOT_FOUND);
}
+void FSP_SRV::OutputAccessLogToSdCard(Kernel::HLERequestContext& ctx) {
+ const auto raw = ctx.ReadBuffer();
+ auto log = Common::StringFromFixedZeroTerminatedBuffer(
+ reinterpret_cast<const char*>(raw.data()), raw.size());
+
+ LOG_DEBUG(Service_FS, "called, log='{}'", log);
+
+ reporter.SaveFilesystemAccessReport(log_mode, std::move(log));
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(RESULT_SUCCESS);
+}
+
+void FSP_SRV::GetAccessLogVersionInfo(Kernel::HLERequestContext& ctx) {
+ LOG_DEBUG(Service_FS, "called");
+
+ IPC::ResponseBuilder rb{ctx, 4};
+ rb.Push(RESULT_SUCCESS);
+ rb.PushEnum(AccessLogVersion::Latest);
+ rb.Push(access_log_program_index);
+}
+
} // namespace Service::FileSystem
diff --git a/src/core/hle/service/filesystem/fsp_srv.h b/src/core/hle/service/filesystem/fsp_srv.h
index d7572ba7a..b5486a193 100644
--- a/src/core/hle/service/filesystem/fsp_srv.h
+++ b/src/core/hle/service/filesystem/fsp_srv.h
@@ -7,15 +7,32 @@
#include <memory>
#include "core/hle/service/service.h"
+namespace Core {
+class Reporter;
+}
+
namespace FileSys {
class FileSystemBackend;
}
namespace Service::FileSystem {
+enum class AccessLogVersion : u32 {
+ V7_0_0 = 2,
+
+ Latest = V7_0_0,
+};
+
+enum class LogMode : u32 {
+ Off,
+ Log,
+ RedirectToSdCard,
+ LogToSdCard = Log | RedirectToSdCard,
+};
+
class FSP_SRV final : public ServiceFramework<FSP_SRV> {
public:
- explicit FSP_SRV();
+ explicit FSP_SRV(const Core::Reporter& reporter);
~FSP_SRV() override;
private:
@@ -26,13 +43,20 @@ private:
void OpenSaveDataFileSystem(Kernel::HLERequestContext& ctx);
void OpenReadOnlySaveDataFileSystem(Kernel::HLERequestContext& ctx);
void OpenSaveDataInfoReaderBySaveDataSpaceId(Kernel::HLERequestContext& ctx);
+ void SetGlobalAccessLogMode(Kernel::HLERequestContext& ctx);
void GetGlobalAccessLogMode(Kernel::HLERequestContext& ctx);
void OpenDataStorageByCurrentProcess(Kernel::HLERequestContext& ctx);
void OpenDataStorageByDataId(Kernel::HLERequestContext& ctx);
void OpenPatchDataStorageByCurrentProcess(Kernel::HLERequestContext& ctx);
+ void OutputAccessLogToSdCard(Kernel::HLERequestContext& ctx);
+ void GetAccessLogVersionInfo(Kernel::HLERequestContext& ctx);
FileSys::VirtualFile romfs;
u64 current_process_id = 0;
+ u32 access_log_program_index = 0;
+ LogMode log_mode = LogMode::LogToSdCard;
+
+ const Core::Reporter& reporter;
};
} // namespace Service::FileSystem
diff --git a/src/core/hle/service/friend/errors.h b/src/core/hle/service/friend/errors.h
new file mode 100644
index 000000000..b3996e275
--- /dev/null
+++ b/src/core/hle/service/friend/errors.h
@@ -0,0 +1,12 @@
+// Copyright 2019 yuzu emulator team
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include "core/hle/result.h"
+
+namespace Service::Friend {
+
+constexpr ResultCode ERR_NO_NOTIFICATIONS{ErrorModule::Account, 15};
+}
diff --git a/src/core/hle/service/friend/friend.cpp b/src/core/hle/service/friend/friend.cpp
index 5100e376c..dec541f2e 100644
--- a/src/core/hle/service/friend/friend.cpp
+++ b/src/core/hle/service/friend/friend.cpp
@@ -2,8 +2,13 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
+#include <queue>
#include "common/logging/log.h"
+#include "common/uuid.h"
#include "core/hle/ipc_helpers.h"
+#include "core/hle/kernel/readable_event.h"
+#include "core/hle/kernel/writable_event.h"
+#include "core/hle/service/friend/errors.h"
#include "core/hle/service/friend/friend.h"
#include "core/hle/service/friend/interface.h"
@@ -109,6 +114,105 @@ private:
}
};
+class INotificationService final : public ServiceFramework<INotificationService> {
+public:
+ INotificationService(Common::UUID uuid) : ServiceFramework("INotificationService"), uuid(uuid) {
+ // clang-format off
+ static const FunctionInfo functions[] = {
+ {0, &INotificationService::GetEvent, "GetEvent"},
+ {1, &INotificationService::Clear, "Clear"},
+ {2, &INotificationService::Pop, "Pop"}
+ };
+ // clang-format on
+
+ RegisterHandlers(functions);
+ }
+
+private:
+ void GetEvent(Kernel::HLERequestContext& ctx) {
+ LOG_DEBUG(Service_ACC, "called");
+
+ IPC::ResponseBuilder rb{ctx, 2, 1};
+ rb.Push(RESULT_SUCCESS);
+
+ if (!is_event_created) {
+ auto& kernel = Core::System::GetInstance().Kernel();
+ notification_event = Kernel::WritableEvent::CreateEventPair(
+ kernel, Kernel::ResetType::Manual, "INotificationService:NotifyEvent");
+ is_event_created = true;
+ }
+ rb.PushCopyObjects(notification_event.readable);
+ }
+
+ void Clear(Kernel::HLERequestContext& ctx) {
+ LOG_DEBUG(Service_ACC, "called");
+ while (!notifications.empty()) {
+ notifications.pop();
+ }
+ std::memset(&states, 0, sizeof(States));
+
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(RESULT_SUCCESS);
+ }
+
+ void Pop(Kernel::HLERequestContext& ctx) {
+ LOG_DEBUG(Service_ACC, "called");
+
+ if (notifications.empty()) {
+ LOG_ERROR(Service_ACC, "No notifications in queue!");
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(ERR_NO_NOTIFICATIONS);
+ return;
+ }
+
+ const auto notification = notifications.front();
+ notifications.pop();
+
+ switch (notification.notification_type) {
+ case NotificationTypes::HasUpdatedFriendsList:
+ states.has_updated_friends = false;
+ break;
+ case NotificationTypes::HasReceivedFriendRequest:
+ states.has_received_friend_request = false;
+ break;
+ default:
+ // HOS seems not have an error case for an unknown notification
+ LOG_WARNING(Service_ACC, "Unknown notification {:08X}",
+ static_cast<u32>(notification.notification_type));
+ break;
+ }
+
+ IPC::ResponseBuilder rb{ctx, 6};
+ rb.Push(RESULT_SUCCESS);
+ rb.PushRaw<SizedNotificationInfo>(notification);
+ }
+
+ enum class NotificationTypes : u32 {
+ HasUpdatedFriendsList = 0x65,
+ HasReceivedFriendRequest = 0x1
+ };
+
+ struct SizedNotificationInfo {
+ NotificationTypes notification_type;
+ INSERT_PADDING_WORDS(
+ 1); // TODO(ogniK): This doesn't seem to be used within any IPC returns as of now
+ u64_le account_id;
+ };
+ static_assert(sizeof(SizedNotificationInfo) == 0x10,
+ "SizedNotificationInfo is an incorrect size");
+
+ struct States {
+ bool has_updated_friends;
+ bool has_received_friend_request;
+ };
+
+ Common::UUID uuid;
+ bool is_event_created = false;
+ Kernel::EventPair notification_event;
+ std::queue<SizedNotificationInfo> notifications;
+ States states{};
+};
+
void Module::Interface::CreateFriendService(Kernel::HLERequestContext& ctx) {
IPC::ResponseBuilder rb{ctx, 2, 0, 1};
rb.Push(RESULT_SUCCESS);
@@ -116,6 +220,17 @@ void Module::Interface::CreateFriendService(Kernel::HLERequestContext& ctx) {
LOG_DEBUG(Service_ACC, "called");
}
+void Module::Interface::CreateNotificationService(Kernel::HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ auto uuid = rp.PopRaw<Common::UUID>();
+
+ LOG_DEBUG(Service_ACC, "called, uuid={}", uuid.Format());
+
+ IPC::ResponseBuilder rb{ctx, 2, 0, 1};
+ rb.Push(RESULT_SUCCESS);
+ rb.PushIpcInterface<INotificationService>(uuid);
+}
+
Module::Interface::Interface(std::shared_ptr<Module> module, const char* name)
: ServiceFramework(name), module(std::move(module)) {}
diff --git a/src/core/hle/service/friend/friend.h b/src/core/hle/service/friend/friend.h
index e762840cb..38d05fa8e 100644
--- a/src/core/hle/service/friend/friend.h
+++ b/src/core/hle/service/friend/friend.h
@@ -16,6 +16,7 @@ public:
~Interface() override;
void CreateFriendService(Kernel::HLERequestContext& ctx);
+ void CreateNotificationService(Kernel::HLERequestContext& ctx);
protected:
std::shared_ptr<Module> module;
diff --git a/src/core/hle/service/friend/interface.cpp b/src/core/hle/service/friend/interface.cpp
index 5a6840af5..5b384f733 100644
--- a/src/core/hle/service/friend/interface.cpp
+++ b/src/core/hle/service/friend/interface.cpp
@@ -10,7 +10,7 @@ Friend::Friend(std::shared_ptr<Module> module, const char* name)
: Interface(std::move(module), name) {
static const FunctionInfo functions[] = {
{0, &Friend::CreateFriendService, "CreateFriendService"},
- {1, nullptr, "CreateNotificationService"},
+ {1, &Friend::CreateNotificationService, "CreateNotificationService"},
{2, nullptr, "CreateDaemonSuspendSessionService"},
};
RegisterHandlers(functions);
diff --git a/src/core/hle/service/service.cpp b/src/core/hle/service/service.cpp
index e441b3730..952c03e27 100644
--- a/src/core/hle/service/service.cpp
+++ b/src/core/hle/service/service.cpp
@@ -195,8 +195,7 @@ ResultCode ServiceFrameworkBase::HandleSyncRequest(Kernel::HLERequestContext& co
// Module interface
/// Initialize ServiceManager
-void Init(std::shared_ptr<SM::ServiceManager>& sm, Core::System& system,
- FileSys::VfsFilesystem& vfs) {
+void Init(std::shared_ptr<SM::ServiceManager>& sm, Core::System& system) {
// NVFlinger needs to be accessed by several services like Vi and AppletOE so we instantiate it
// here and pass it into the respective InstallInterfaces functions.
auto nv_flinger = std::make_shared<NVFlinger::NVFlinger>(system.CoreTiming());
@@ -218,7 +217,7 @@ void Init(std::shared_ptr<SM::ServiceManager>& sm, Core::System& system,
EUPLD::InstallInterfaces(*sm);
Fatal::InstallInterfaces(*sm);
FGM::InstallInterfaces(*sm);
- FileSystem::InstallInterfaces(*sm, vfs);
+ FileSystem::InstallInterfaces(system);
Friend::InstallInterfaces(*sm);
Glue::InstallInterfaces(system);
GRC::InstallInterfaces(*sm);
@@ -249,7 +248,7 @@ void Init(std::shared_ptr<SM::ServiceManager>& sm, Core::System& system,
Sockets::InstallInterfaces(*sm);
SPL::InstallInterfaces(*sm);
SSL::InstallInterfaces(*sm);
- Time::InstallInterfaces(*sm);
+ Time::InstallInterfaces(system);
USB::InstallInterfaces(*sm);
VI::InstallInterfaces(*sm, nv_flinger);
WLAN::InstallInterfaces(*sm);
diff --git a/src/core/hle/service/service.h b/src/core/hle/service/service.h
index abbfe5524..c6c4bdae5 100644
--- a/src/core/hle/service/service.h
+++ b/src/core/hle/service/service.h
@@ -182,8 +182,7 @@ private:
};
/// Initialize ServiceManager
-void Init(std::shared_ptr<SM::ServiceManager>& sm, Core::System& system,
- FileSys::VfsFilesystem& vfs);
+void Init(std::shared_ptr<SM::ServiceManager>& sm, Core::System& system);
/// Shutdown ServiceManager
void Shutdown();
diff --git a/src/core/hle/service/set/set.cpp b/src/core/hle/service/set/set.cpp
index 298d85011..b54214421 100644
--- a/src/core/hle/service/set/set.cpp
+++ b/src/core/hle/service/set/set.cpp
@@ -95,6 +95,14 @@ void SET::GetAvailableLanguageCodeCount2(Kernel::HLERequestContext& ctx) {
PushResponseLanguageCode(ctx, post4_0_0_max_entries);
}
+void SET::GetQuestFlag(Kernel::HLERequestContext& ctx) {
+ LOG_DEBUG(Service_SET, "called");
+
+ IPC::ResponseBuilder rb{ctx, 3};
+ rb.Push(RESULT_SUCCESS);
+ rb.Push(static_cast<u32>(Settings::values.quest_flag));
+}
+
void SET::GetLanguageCode(Kernel::HLERequestContext& ctx) {
LOG_DEBUG(Service_SET, "called {}", Settings::values.language_index);
@@ -114,7 +122,7 @@ SET::SET() : ServiceFramework("set") {
{5, &SET::GetAvailableLanguageCodes2, "GetAvailableLanguageCodes2"},
{6, &SET::GetAvailableLanguageCodeCount2, "GetAvailableLanguageCodeCount2"},
{7, nullptr, "GetKeyCodeMap"},
- {8, nullptr, "GetQuestFlag"},
+ {8, &SET::GetQuestFlag, "GetQuestFlag"},
{9, nullptr, "GetKeyCodeMap2"},
};
// clang-format on
diff --git a/src/core/hle/service/set/set.h b/src/core/hle/service/set/set.h
index 31f9cb296..b154e08aa 100644
--- a/src/core/hle/service/set/set.h
+++ b/src/core/hle/service/set/set.h
@@ -42,6 +42,7 @@ private:
void GetAvailableLanguageCodes2(Kernel::HLERequestContext& ctx);
void GetAvailableLanguageCodeCount(Kernel::HLERequestContext& ctx);
void GetAvailableLanguageCodeCount2(Kernel::HLERequestContext& ctx);
+ void GetQuestFlag(Kernel::HLERequestContext& ctx);
};
} // namespace Service::Set
diff --git a/src/core/hle/service/time/interface.cpp b/src/core/hle/service/time/interface.cpp
index 8d122ae33..1030185e0 100644
--- a/src/core/hle/service/time/interface.cpp
+++ b/src/core/hle/service/time/interface.cpp
@@ -6,8 +6,9 @@
namespace Service::Time {
-Time::Time(std::shared_ptr<Module> time, const char* name)
- : Module::Interface(std::move(time), name) {
+Time::Time(std::shared_ptr<Module> time, std::shared_ptr<SharedMemory> shared_memory,
+ const char* name)
+ : Module::Interface(std::move(time), std::move(shared_memory), name) {
// clang-format off
static const FunctionInfo functions[] = {
{0, &Time::GetStandardUserSystemClock, "GetStandardUserSystemClock"},
@@ -16,12 +17,12 @@ Time::Time(std::shared_ptr<Module> time, const char* name)
{3, &Time::GetTimeZoneService, "GetTimeZoneService"},
{4, &Time::GetStandardLocalSystemClock, "GetStandardLocalSystemClock"},
{5, nullptr, "GetEphemeralNetworkSystemClock"},
- {20, nullptr, "GetSharedMemoryNativeHandle"},
+ {20, &Time::GetSharedMemoryNativeHandle, "GetSharedMemoryNativeHandle"},
{30, nullptr, "GetStandardNetworkClockOperationEventReadableHandle"},
{31, nullptr, "GetEphemeralNetworkClockOperationEventReadableHandle"},
{50, nullptr, "SetStandardSteadyClockInternalOffset"},
- {100, nullptr, "IsStandardUserSystemClockAutomaticCorrectionEnabled"},
- {101, nullptr, "SetStandardUserSystemClockAutomaticCorrectionEnabled"},
+ {100, &Time::IsStandardUserSystemClockAutomaticCorrectionEnabled, "IsStandardUserSystemClockAutomaticCorrectionEnabled"},
+ {101, &Time::SetStandardUserSystemClockAutomaticCorrectionEnabled, "SetStandardUserSystemClockAutomaticCorrectionEnabled"},
{102, nullptr, "GetStandardUserSystemClockInitialYear"},
{200, nullptr, "IsStandardNetworkSystemClockAccuracySufficient"},
{201, nullptr, "GetStandardUserSystemClockAutomaticCorrectionUpdatedTime"},
diff --git a/src/core/hle/service/time/interface.h b/src/core/hle/service/time/interface.h
index cd6b44dec..bdf0883e2 100644
--- a/src/core/hle/service/time/interface.h
+++ b/src/core/hle/service/time/interface.h
@@ -8,9 +8,12 @@
namespace Service::Time {
+class SharedMemory;
+
class Time final : public Module::Interface {
public:
- explicit Time(std::shared_ptr<Module> time, const char* name);
+ explicit Time(std::shared_ptr<Module> time, std::shared_ptr<SharedMemory> shared_memory,
+ const char* name);
~Time() override;
};
diff --git a/src/core/hle/service/time/time.cpp b/src/core/hle/service/time/time.cpp
index 346bad80d..ae6446204 100644
--- a/src/core/hle/service/time/time.cpp
+++ b/src/core/hle/service/time/time.cpp
@@ -13,6 +13,7 @@
#include "core/hle/kernel/client_session.h"
#include "core/hle/service/time/interface.h"
#include "core/hle/service/time/time.h"
+#include "core/hle/service/time/time_sharedmemory.h"
#include "core/settings.h"
namespace Service::Time {
@@ -61,9 +62,18 @@ static u64 CalendarToPosix(const CalendarTime& calendar_time,
return static_cast<u64>(epoch_time);
}
+enum class ClockContextType {
+ StandardSteady,
+ StandardUserSystem,
+ StandardNetworkSystem,
+ StandardLocalSystem,
+};
+
class ISystemClock final : public ServiceFramework<ISystemClock> {
public:
- ISystemClock() : ServiceFramework("ISystemClock") {
+ ISystemClock(std::shared_ptr<Service::Time::SharedMemory> shared_memory,
+ ClockContextType clock_type)
+ : ServiceFramework("ISystemClock"), shared_memory(shared_memory), clock_type(clock_type) {
static const FunctionInfo functions[] = {
{0, &ISystemClock::GetCurrentTime, "GetCurrentTime"},
{1, nullptr, "SetCurrentTime"},
@@ -72,6 +82,8 @@ public:
};
RegisterHandlers(functions);
+
+ UpdateSharedMemoryContext(system_clock_context);
}
private:
@@ -87,34 +99,63 @@ private:
void GetSystemClockContext(Kernel::HLERequestContext& ctx) {
LOG_WARNING(Service_Time, "(STUBBED) called");
- SystemClockContext system_clock_ontext{};
+ // TODO(ogniK): This should be updated periodically however since we have it stubbed we'll
+ // only update when we get a new context
+ UpdateSharedMemoryContext(system_clock_context);
+
IPC::ResponseBuilder rb{ctx, (sizeof(SystemClockContext) / 4) + 2};
rb.Push(RESULT_SUCCESS);
- rb.PushRaw(system_clock_ontext);
+ rb.PushRaw(system_clock_context);
}
+
+ void UpdateSharedMemoryContext(const SystemClockContext& clock_context) {
+ switch (clock_type) {
+ case ClockContextType::StandardLocalSystem:
+ shared_memory->SetStandardLocalSystemClockContext(clock_context);
+ break;
+ case ClockContextType::StandardNetworkSystem:
+ shared_memory->SetStandardNetworkSystemClockContext(clock_context);
+ break;
+ }
+ }
+
+ SystemClockContext system_clock_context{};
+ std::shared_ptr<Service::Time::SharedMemory> shared_memory;
+ ClockContextType clock_type;
};
class ISteadyClock final : public ServiceFramework<ISteadyClock> {
public:
- ISteadyClock() : ServiceFramework("ISteadyClock") {
+ ISteadyClock(std::shared_ptr<SharedMemory> shared_memory)
+ : ServiceFramework("ISteadyClock"), shared_memory(shared_memory) {
static const FunctionInfo functions[] = {
{0, &ISteadyClock::GetCurrentTimePoint, "GetCurrentTimePoint"},
};
RegisterHandlers(functions);
+
+ shared_memory->SetStandardSteadyClockTimepoint(GetCurrentTimePoint());
}
private:
void GetCurrentTimePoint(Kernel::HLERequestContext& ctx) {
LOG_DEBUG(Service_Time, "called");
- const auto& core_timing = Core::System::GetInstance().CoreTiming();
- const auto ms = Core::Timing::CyclesToMs(core_timing.GetTicks());
- const SteadyClockTimePoint steady_clock_time_point{static_cast<u64_le>(ms.count() / 1000),
- {}};
+ const auto time_point = GetCurrentTimePoint();
+ // TODO(ogniK): This should be updated periodically
+ shared_memory->SetStandardSteadyClockTimepoint(time_point);
+
IPC::ResponseBuilder rb{ctx, (sizeof(SteadyClockTimePoint) / 4) + 2};
rb.Push(RESULT_SUCCESS);
- rb.PushRaw(steady_clock_time_point);
+ rb.PushRaw(time_point);
}
+
+ SteadyClockTimePoint GetCurrentTimePoint() const {
+ const auto& core_timing = Core::System::GetInstance().CoreTiming();
+ const auto ms = Core::Timing::CyclesToMs(core_timing.GetTicks());
+ return {static_cast<u64_le>(ms.count() / 1000), {}};
+ }
+
+ std::shared_ptr<SharedMemory> shared_memory;
};
class ITimeZoneService final : public ServiceFramework<ITimeZoneService> {
@@ -233,7 +274,7 @@ void Module::Interface::GetStandardUserSystemClock(Kernel::HLERequestContext& ct
IPC::ResponseBuilder rb{ctx, 2, 0, 1};
rb.Push(RESULT_SUCCESS);
- rb.PushIpcInterface<ISystemClock>();
+ rb.PushIpcInterface<ISystemClock>(shared_memory, ClockContextType::StandardUserSystem);
}
void Module::Interface::GetStandardNetworkSystemClock(Kernel::HLERequestContext& ctx) {
@@ -241,7 +282,7 @@ void Module::Interface::GetStandardNetworkSystemClock(Kernel::HLERequestContext&
IPC::ResponseBuilder rb{ctx, 2, 0, 1};
rb.Push(RESULT_SUCCESS);
- rb.PushIpcInterface<ISystemClock>();
+ rb.PushIpcInterface<ISystemClock>(shared_memory, ClockContextType::StandardNetworkSystem);
}
void Module::Interface::GetStandardSteadyClock(Kernel::HLERequestContext& ctx) {
@@ -249,7 +290,7 @@ void Module::Interface::GetStandardSteadyClock(Kernel::HLERequestContext& ctx) {
IPC::ResponseBuilder rb{ctx, 2, 0, 1};
rb.Push(RESULT_SUCCESS);
- rb.PushIpcInterface<ISteadyClock>();
+ rb.PushIpcInterface<ISteadyClock>(shared_memory);
}
void Module::Interface::GetTimeZoneService(Kernel::HLERequestContext& ctx) {
@@ -265,7 +306,7 @@ void Module::Interface::GetStandardLocalSystemClock(Kernel::HLERequestContext& c
IPC::ResponseBuilder rb{ctx, 2, 0, 1};
rb.Push(RESULT_SUCCESS);
- rb.PushIpcInterface<ISystemClock>();
+ rb.PushIpcInterface<ISystemClock>(shared_memory, ClockContextType::StandardLocalSystem);
}
void Module::Interface::GetClockSnapshot(Kernel::HLERequestContext& ctx) {
@@ -333,16 +374,52 @@ void Module::Interface::CalculateStandardUserSystemClockDifferenceByUser(
rb.PushRaw<u64>(difference);
}
-Module::Interface::Interface(std::shared_ptr<Module> time, const char* name)
- : ServiceFramework(name), time(std::move(time)) {}
+void Module::Interface::GetSharedMemoryNativeHandle(Kernel::HLERequestContext& ctx) {
+ LOG_DEBUG(Service_Time, "called");
+ IPC::ResponseBuilder rb{ctx, 2, 1};
+ rb.Push(RESULT_SUCCESS);
+ rb.PushCopyObjects(shared_memory->GetSharedMemoryHolder());
+}
+
+void Module::Interface::IsStandardUserSystemClockAutomaticCorrectionEnabled(
+ Kernel::HLERequestContext& ctx) {
+ // ogniK(TODO): When clock contexts are implemented, the value should be read from the context
+ // instead of our shared memory holder
+ LOG_DEBUG(Service_Time, "called");
+
+ IPC::ResponseBuilder rb{ctx, 3};
+ rb.Push(RESULT_SUCCESS);
+ rb.Push<u8>(shared_memory->GetStandardUserSystemClockAutomaticCorrectionEnabled());
+}
+
+void Module::Interface::SetStandardUserSystemClockAutomaticCorrectionEnabled(
+ Kernel::HLERequestContext& ctx) {
+ IPC::RequestParser rp{ctx};
+ const auto enabled = rp.Pop<u8>();
+
+ LOG_WARNING(Service_Time, "(PARTIAL IMPLEMENTATION) called");
+
+ // TODO(ogniK): Update clock contexts and correct timespans
+
+ shared_memory->SetStandardUserSystemClockAutomaticCorrectionEnabled(enabled > 0);
+ IPC::ResponseBuilder rb{ctx, 2};
+ rb.Push(RESULT_SUCCESS);
+}
+
+Module::Interface::Interface(std::shared_ptr<Module> time,
+ std::shared_ptr<SharedMemory> shared_memory, const char* name)
+ : ServiceFramework(name), time(std::move(time)), shared_memory(std::move(shared_memory)) {}
Module::Interface::~Interface() = default;
-void InstallInterfaces(SM::ServiceManager& service_manager) {
+void InstallInterfaces(Core::System& system) {
auto time = std::make_shared<Module>();
- std::make_shared<Time>(time, "time:a")->InstallAsService(service_manager);
- std::make_shared<Time>(time, "time:s")->InstallAsService(service_manager);
- std::make_shared<Time>(time, "time:u")->InstallAsService(service_manager);
+ auto shared_mem = std::make_shared<SharedMemory>(system);
+
+ std::make_shared<Time>(time, shared_mem, "time:a")->InstallAsService(system.ServiceManager());
+ std::make_shared<Time>(time, shared_mem, "time:s")->InstallAsService(system.ServiceManager());
+ std::make_shared<Time>(std::move(time), shared_mem, "time:u")
+ ->InstallAsService(system.ServiceManager());
}
} // namespace Service::Time
diff --git a/src/core/hle/service/time/time.h b/src/core/hle/service/time/time.h
index f11affe95..e0708f856 100644
--- a/src/core/hle/service/time/time.h
+++ b/src/core/hle/service/time/time.h
@@ -10,6 +10,8 @@
namespace Service::Time {
+class SharedMemory;
+
struct LocationName {
std::array<u8, 0x24> name;
};
@@ -77,7 +79,8 @@ class Module final {
public:
class Interface : public ServiceFramework<Interface> {
public:
- explicit Interface(std::shared_ptr<Module> time, const char* name);
+ explicit Interface(std::shared_ptr<Module> time,
+ std::shared_ptr<SharedMemory> shared_memory, const char* name);
~Interface() override;
void GetStandardUserSystemClock(Kernel::HLERequestContext& ctx);
@@ -87,13 +90,17 @@ public:
void GetStandardLocalSystemClock(Kernel::HLERequestContext& ctx);
void GetClockSnapshot(Kernel::HLERequestContext& ctx);
void CalculateStandardUserSystemClockDifferenceByUser(Kernel::HLERequestContext& ctx);
+ void GetSharedMemoryNativeHandle(Kernel::HLERequestContext& ctx);
+ void IsStandardUserSystemClockAutomaticCorrectionEnabled(Kernel::HLERequestContext& ctx);
+ void SetStandardUserSystemClockAutomaticCorrectionEnabled(Kernel::HLERequestContext& ctx);
protected:
std::shared_ptr<Module> time;
+ std::shared_ptr<SharedMemory> shared_memory;
};
};
/// Registers all Time services with the specified service manager.
-void InstallInterfaces(SM::ServiceManager& service_manager);
+void InstallInterfaces(Core::System& system);
} // namespace Service::Time
diff --git a/src/core/hle/service/time/time_sharedmemory.cpp b/src/core/hle/service/time/time_sharedmemory.cpp
new file mode 100644
index 000000000..bfc81b83c
--- /dev/null
+++ b/src/core/hle/service/time/time_sharedmemory.cpp
@@ -0,0 +1,68 @@
+// Copyright 2019 yuzu emulator team
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include "core/core.h"
+#include "core/hle/service/time/time_sharedmemory.h"
+
+namespace Service::Time {
+const std::size_t SHARED_MEMORY_SIZE = 0x1000;
+
+SharedMemory::SharedMemory(Core::System& system) : system(system) {
+ shared_memory_holder = Kernel::SharedMemory::Create(
+ system.Kernel(), nullptr, SHARED_MEMORY_SIZE, Kernel::MemoryPermission::ReadWrite,
+ Kernel::MemoryPermission::Read, 0, Kernel::MemoryRegion::BASE, "Time:SharedMemory");
+
+ // Seems static from 1.0.0 -> 8.1.0. Specific games seem to check this value and crash
+ // if it's set to anything else
+ shared_memory_format.format_version = 14;
+ std::memcpy(shared_memory_holder->GetPointer(), &shared_memory_format, sizeof(Format));
+}
+
+SharedMemory::~SharedMemory() = default;
+
+Kernel::SharedPtr<Kernel::SharedMemory> SharedMemory::GetSharedMemoryHolder() const {
+ return shared_memory_holder;
+}
+
+void SharedMemory::SetStandardSteadyClockTimepoint(const SteadyClockTimePoint& timepoint) {
+ shared_memory_format.standard_steady_clock_timepoint.StoreData(
+ shared_memory_holder->GetPointer(), timepoint);
+}
+
+void SharedMemory::SetStandardLocalSystemClockContext(const SystemClockContext& context) {
+ shared_memory_format.standard_local_system_clock_context.StoreData(
+ shared_memory_holder->GetPointer(), context);
+}
+
+void SharedMemory::SetStandardNetworkSystemClockContext(const SystemClockContext& context) {
+ shared_memory_format.standard_network_system_clock_context.StoreData(
+ shared_memory_holder->GetPointer(), context);
+}
+
+void SharedMemory::SetStandardUserSystemClockAutomaticCorrectionEnabled(bool enabled) {
+ shared_memory_format.standard_user_system_clock_automatic_correction.StoreData(
+ shared_memory_holder->GetPointer(), enabled);
+}
+
+SteadyClockTimePoint SharedMemory::GetStandardSteadyClockTimepoint() {
+ return shared_memory_format.standard_steady_clock_timepoint.ReadData(
+ shared_memory_holder->GetPointer());
+}
+
+SystemClockContext SharedMemory::GetStandardLocalSystemClockContext() {
+ return shared_memory_format.standard_local_system_clock_context.ReadData(
+ shared_memory_holder->GetPointer());
+}
+
+SystemClockContext SharedMemory::GetStandardNetworkSystemClockContext() {
+ return shared_memory_format.standard_network_system_clock_context.ReadData(
+ shared_memory_holder->GetPointer());
+}
+
+bool SharedMemory::GetStandardUserSystemClockAutomaticCorrectionEnabled() {
+ return shared_memory_format.standard_user_system_clock_automatic_correction.ReadData(
+ shared_memory_holder->GetPointer());
+}
+
+} // namespace Service::Time
diff --git a/src/core/hle/service/time/time_sharedmemory.h b/src/core/hle/service/time/time_sharedmemory.h
new file mode 100644
index 000000000..cb8253541
--- /dev/null
+++ b/src/core/hle/service/time/time_sharedmemory.h
@@ -0,0 +1,74 @@
+// Copyright 2019 yuzu emulator team
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include "common/common_types.h"
+#include "core/hle/kernel/shared_memory.h"
+#include "core/hle/service/time/time.h"
+
+namespace Service::Time {
+class SharedMemory {
+public:
+ explicit SharedMemory(Core::System& system);
+ ~SharedMemory();
+
+ // Return the shared memory handle
+ Kernel::SharedPtr<Kernel::SharedMemory> GetSharedMemoryHolder() const;
+
+ // Set memory barriers in shared memory and update them
+ void SetStandardSteadyClockTimepoint(const SteadyClockTimePoint& timepoint);
+ void SetStandardLocalSystemClockContext(const SystemClockContext& context);
+ void SetStandardNetworkSystemClockContext(const SystemClockContext& context);
+ void SetStandardUserSystemClockAutomaticCorrectionEnabled(bool enabled);
+
+ // Pull from memory barriers in the shared memory
+ SteadyClockTimePoint GetStandardSteadyClockTimepoint();
+ SystemClockContext GetStandardLocalSystemClockContext();
+ SystemClockContext GetStandardNetworkSystemClockContext();
+ bool GetStandardUserSystemClockAutomaticCorrectionEnabled();
+
+ // TODO(ogniK): We have to properly simulate memory barriers, how are we going to do this?
+ template <typename T, std::size_t Offset>
+ struct MemoryBarrier {
+ static_assert(std::is_trivially_constructible_v<T>, "T must be trivially constructable");
+ u32_le read_attempt{};
+ std::array<T, 2> data{};
+
+ // These are not actually memory barriers at the moment as we don't have multicore and all
+ // HLE is mutexed. This will need to properly be implemented when we start updating the time
+ // points on threads. As of right now, we'll be updated both values synchronously and just
+ // incrementing the read_attempt to indicate that we waited.
+ void StoreData(u8* shared_memory, T data_to_store) {
+ std::memcpy(this, shared_memory + Offset, sizeof(*this));
+ read_attempt++;
+ data[read_attempt & 1] = data_to_store;
+ std::memcpy(shared_memory + Offset, this, sizeof(*this));
+ }
+
+ // For reading we're just going to read the last stored value. If there was no value stored
+ // it will just end up reading an empty value as intended.
+ T ReadData(u8* shared_memory) {
+ std::memcpy(this, shared_memory + Offset, sizeof(*this));
+ return data[(read_attempt - 1) & 1];
+ }
+ };
+
+ // Shared memory format
+ struct Format {
+ MemoryBarrier<SteadyClockTimePoint, 0x0> standard_steady_clock_timepoint;
+ MemoryBarrier<SystemClockContext, 0x38> standard_local_system_clock_context;
+ MemoryBarrier<SystemClockContext, 0x80> standard_network_system_clock_context;
+ MemoryBarrier<bool, 0xc8> standard_user_system_clock_automatic_correction;
+ u32_le format_version;
+ };
+ static_assert(sizeof(Format) == 0xd8, "Format is an invalid size");
+
+private:
+ Kernel::SharedPtr<Kernel::SharedMemory> shared_memory_holder{};
+ Core::System& system;
+ Format shared_memory_format{};
+};
+
+} // namespace Service::Time
diff --git a/src/core/loader/nsp.cpp b/src/core/loader/nsp.cpp
index 3a22ec2c6..b1171ce65 100644
--- a/src/core/loader/nsp.cpp
+++ b/src/core/loader/nsp.cpp
@@ -168,7 +168,8 @@ ResultStatus AppLoader_NSP::ReadControlData(FileSys::NACP& nacp) {
}
ResultStatus AppLoader_NSP::ReadManualRomFS(FileSys::VirtualFile& file) {
- const auto nca = nsp->GetNCA(nsp->GetProgramTitleID(), FileSys::ContentRecordType::Manual);
+ const auto nca =
+ nsp->GetNCA(nsp->GetProgramTitleID(), FileSys::ContentRecordType::HtmlDocument);
if (nsp->GetStatus() != ResultStatus::Success || nca == nullptr)
return ResultStatus::ErrorNoRomFS;
file = nca->GetRomFS();
diff --git a/src/core/loader/xci.cpp b/src/core/loader/xci.cpp
index a5c4d3688..5e8553db9 100644
--- a/src/core/loader/xci.cpp
+++ b/src/core/loader/xci.cpp
@@ -134,7 +134,7 @@ ResultStatus AppLoader_XCI::ReadControlData(FileSys::NACP& control) {
ResultStatus AppLoader_XCI::ReadManualRomFS(FileSys::VirtualFile& file) {
const auto nca = xci->GetSecurePartitionNSP()->GetNCA(xci->GetProgramTitleID(),
- FileSys::ContentRecordType::Manual);
+ FileSys::ContentRecordType::HtmlDocument);
if (xci->GetStatus() != ResultStatus::Success || nca == nullptr)
return ResultStatus::ErrorXCIMissingPartition;
file = nca->GetRomFS();
diff --git a/src/core/memory.cpp b/src/core/memory.cpp
index f18f6226b..8555691c0 100644
--- a/src/core/memory.cpp
+++ b/src/core/memory.cpp
@@ -16,11 +16,9 @@
#include "core/core.h"
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/vm_manager.h"
-#include "core/hle/lock.h"
#include "core/memory.h"
#include "core/memory_setup.h"
#include "video_core/gpu.h"
-#include "video_core/renderer_base.h"
namespace Memory {
diff --git a/src/core/memory.h b/src/core/memory.h
index 04e2c5f1d..09008e1dd 100644
--- a/src/core/memory.h
+++ b/src/core/memory.h
@@ -8,10 +8,6 @@
#include <string>
#include "common/common_types.h"
-namespace Common {
-struct PageTable;
-}
-
namespace Kernel {
class Process;
}
diff --git a/src/core/reporter.cpp b/src/core/reporter.cpp
index 774022569..5d4c3e6ea 100644
--- a/src/core/reporter.cpp
+++ b/src/core/reporter.cpp
@@ -2,8 +2,13 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
+#include <ctime>
#include <fstream>
+
+#include <fmt/format.h>
+#include <fmt/time.h>
#include <json.hpp>
+
#include "common/file_util.h"
#include "common/hex_util.h"
#include "common/scm_rev.h"
@@ -14,7 +19,6 @@
#include "core/hle/result.h"
#include "core/reporter.h"
#include "core/settings.h"
-#include "fmt/time.h"
namespace {
@@ -30,9 +34,11 @@ std::string GetTimestamp() {
using namespace nlohmann;
-void SaveToFile(const json& json, const std::string& filename) {
- if (!FileUtil::CreateFullPath(filename))
+void SaveToFile(json json, const std::string& filename) {
+ if (!FileUtil::CreateFullPath(filename)) {
LOG_ERROR(Core, "Failed to create path for '{}' to save report!", filename);
+ return;
+ }
std::ofstream file(
FileUtil::SanitizePath(filename, FileUtil::DirectorySeparator::PlatformDefault));
@@ -61,8 +67,11 @@ json GetReportCommonData(u64 title_id, ResultCode result, const std::string& tim
{"result_description", fmt::format("{:08X}", result.description.Value())},
{"timestamp", timestamp},
};
- if (user_id.has_value())
+
+ if (user_id.has_value()) {
out["user_id"] = fmt::format("{:016X}{:016X}", (*user_id)[1], (*user_id)[0]);
+ }
+
return out;
}
@@ -171,14 +180,14 @@ json GetHLERequestContextData(Kernel::HLERequestContext& ctx) {
out["buffer_descriptor_c"] = GetHLEBufferDescriptorData<false>(ctx.BufferDescriptorC());
out["buffer_descriptor_x"] = GetHLEBufferDescriptorData<true>(ctx.BufferDescriptorX());
- return std::move(out);
+ return out;
}
} // Anonymous namespace
namespace Core {
-Reporter::Reporter(Core::System& system) : system(system) {}
+Reporter::Reporter(System& system) : system(system) {}
Reporter::~Reporter() = default;
@@ -187,8 +196,9 @@ void Reporter::SaveCrashReport(u64 title_id, ResultCode result, u64 set_flags, u
const std::array<u64, 31>& registers,
const std::array<u64, 32>& backtrace, u32 backtrace_size,
const std::string& arch, u32 unk10) const {
- if (!IsReportingEnabled())
+ if (!IsReportingEnabled()) {
return;
+ }
const auto timestamp = GetTimestamp();
json out;
@@ -212,8 +222,9 @@ void Reporter::SaveCrashReport(u64 title_id, ResultCode result, u64 set_flags, u
void Reporter::SaveSvcBreakReport(u32 type, bool signal_debugger, u64 info1, u64 info2,
std::optional<std::vector<u8>> resolved_buffer) const {
- if (!IsReportingEnabled())
+ if (!IsReportingEnabled()) {
return;
+ }
const auto timestamp = GetTimestamp();
const auto title_id = system.CurrentProcess()->GetTitleID();
@@ -238,8 +249,9 @@ void Reporter::SaveSvcBreakReport(u32 type, bool signal_debugger, u64 info1, u64
void Reporter::SaveUnimplementedFunctionReport(Kernel::HLERequestContext& ctx, u32 command_id,
const std::string& name,
const std::string& service_name) const {
- if (!IsReportingEnabled())
+ if (!IsReportingEnabled()) {
return;
+ }
const auto timestamp = GetTimestamp();
const auto title_id = system.CurrentProcess()->GetTitleID();
@@ -259,8 +271,9 @@ void Reporter::SaveUnimplementedAppletReport(
u32 applet_id, u32 common_args_version, u32 library_version, u32 theme_color,
bool startup_sound, u64 system_tick, std::vector<std::vector<u8>> normal_channel,
std::vector<std::vector<u8>> interactive_channel) const {
- if (!IsReportingEnabled())
+ if (!IsReportingEnabled()) {
return;
+ }
const auto timestamp = GetTimestamp();
const auto title_id = system.CurrentProcess()->GetTitleID();
@@ -293,8 +306,9 @@ void Reporter::SaveUnimplementedAppletReport(
void Reporter::SavePlayReport(u64 title_id, u64 process_id, std::vector<std::vector<u8>> data,
std::optional<u128> user_id) const {
- if (!IsReportingEnabled())
+ if (!IsReportingEnabled()) {
return;
+ }
const auto timestamp = GetTimestamp();
json out;
@@ -316,8 +330,9 @@ void Reporter::SavePlayReport(u64 title_id, u64 process_id, std::vector<std::vec
void Reporter::SaveErrorReport(u64 title_id, ResultCode result,
std::optional<std::string> custom_text_main,
std::optional<std::string> custom_text_detail) const {
- if (!IsReportingEnabled())
+ if (!IsReportingEnabled()) {
return;
+ }
const auto timestamp = GetTimestamp();
json out;
@@ -335,12 +350,31 @@ void Reporter::SaveErrorReport(u64 title_id, ResultCode result,
SaveToFile(std::move(out), GetPath("error_report", title_id, timestamp));
}
-void Reporter::SaveUserReport() const {
+void Reporter::SaveFilesystemAccessReport(Service::FileSystem::LogMode log_mode,
+ std::string log_message) const {
if (!IsReportingEnabled())
return;
const auto timestamp = GetTimestamp();
const auto title_id = system.CurrentProcess()->GetTitleID();
+ json out;
+
+ out["yuzu_version"] = GetYuzuVersionData();
+ out["report_common"] = GetReportCommonData(title_id, RESULT_SUCCESS, timestamp);
+
+ out["log_mode"] = fmt::format("{:08X}", static_cast<u32>(log_mode));
+ out["log_message"] = std::move(log_message);
+
+ SaveToFile(std::move(out), GetPath("filesystem_access_report", title_id, timestamp));
+}
+
+void Reporter::SaveUserReport() const {
+ if (!IsReportingEnabled()) {
+ return;
+ }
+
+ const auto timestamp = GetTimestamp();
+ const auto title_id = system.CurrentProcess()->GetTitleID();
SaveToFile(GetFullDataAuto(timestamp, title_id, system),
GetPath("user_report", title_id, timestamp));
diff --git a/src/core/reporter.h b/src/core/reporter.h
index 3de19c0f7..44256de50 100644
--- a/src/core/reporter.h
+++ b/src/core/reporter.h
@@ -4,7 +4,9 @@
#pragma once
+#include <array>
#include <optional>
+#include <string>
#include <vector>
#include "common/common_types.h"
@@ -14,11 +16,17 @@ namespace Kernel {
class HLERequestContext;
} // namespace Kernel
+namespace Service::FileSystem {
+enum class LogMode : u32;
+}
+
namespace Core {
+class System;
+
class Reporter {
public:
- explicit Reporter(Core::System& system);
+ explicit Reporter(System& system);
~Reporter();
void SaveCrashReport(u64 title_id, ResultCode result, u64 set_flags, u64 entry_point, u64 sp,
@@ -45,12 +53,15 @@ public:
std::optional<std::string> custom_text_main = {},
std::optional<std::string> custom_text_detail = {}) const;
+ void SaveFilesystemAccessReport(Service::FileSystem::LogMode log_mode,
+ std::string log_message) const;
+
void SaveUserReport() const;
private:
bool IsReportingEnabled() const;
- Core::System& system;
+ System& system;
};
} // namespace Core
diff --git a/src/core/settings.cpp b/src/core/settings.cpp
index 6d32ebea3..63aa59690 100644
--- a/src/core/settings.cpp
+++ b/src/core/settings.cpp
@@ -85,7 +85,7 @@ void LogSettings() {
LogSetting("System_RngSeed", Settings::values.rng_seed.value_or(0));
LogSetting("System_CurrentUser", Settings::values.current_user);
LogSetting("System_LanguageIndex", Settings::values.language_index);
- LogSetting("Core_UseCpuJit", Settings::values.use_cpu_jit);
+ LogSetting("Core_CpuJitEnabled", Settings::values.cpu_jit_enabled);
LogSetting("Core_UseMultiCore", Settings::values.use_multi_core);
LogSetting("Renderer_UseResolutionFactor", Settings::values.resolution_factor);
LogSetting("Renderer_UseFrameLimit", Settings::values.use_frame_limit);
diff --git a/src/core/settings.h b/src/core/settings.h
index e2ffcaaf7..acf18d653 100644
--- a/src/core/settings.h
+++ b/src/core/settings.h
@@ -378,7 +378,7 @@ struct Values {
std::atomic_bool is_device_reload_pending{true};
// Core
- bool use_cpu_jit;
+ bool cpu_jit_enabled;
bool use_multi_core;
// Data Storage
@@ -416,6 +416,7 @@ struct Values {
bool dump_exefs;
bool dump_nso;
bool reporting_services;
+ bool quest_flag;
// WebService
bool enable_telemetry;
diff --git a/src/core/telemetry_session.cpp b/src/core/telemetry_session.cpp
index 90d06830f..98f49042a 100644
--- a/src/core/telemetry_session.cpp
+++ b/src/core/telemetry_session.cpp
@@ -168,7 +168,7 @@ void TelemetrySession::AddInitialInfo(Loader::AppLoader& app_loader) {
AddField(Telemetry::FieldType::UserConfig, "Audio_SinkId", Settings::values.sink_id);
AddField(Telemetry::FieldType::UserConfig, "Audio_EnableAudioStretching",
Settings::values.enable_audio_stretching);
- AddField(Telemetry::FieldType::UserConfig, "Core_UseCpuJit", Settings::values.use_cpu_jit);
+ AddField(Telemetry::FieldType::UserConfig, "Core_UseCpuJit", Settings::values.cpu_jit_enabled);
AddField(Telemetry::FieldType::UserConfig, "Core_UseMultiCore",
Settings::values.use_multi_core);
AddField(Telemetry::FieldType::UserConfig, "Renderer_ResolutionFactor",
diff --git a/src/core/tools/freezer.cpp b/src/core/tools/freezer.cpp
new file mode 100644
index 000000000..17f050068
--- /dev/null
+++ b/src/core/tools/freezer.cpp
@@ -0,0 +1,188 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include "common/assert.h"
+#include "common/logging/log.h"
+#include "core/core.h"
+#include "core/core_timing.h"
+#include "core/core_timing_util.h"
+#include "core/memory.h"
+#include "core/tools/freezer.h"
+
+namespace Tools {
+
+namespace {
+
+constexpr s64 MEMORY_FREEZER_TICKS = static_cast<s64>(Core::Timing::BASE_CLOCK_RATE / 60);
+
+u64 MemoryReadWidth(u32 width, VAddr addr) {
+ switch (width) {
+ case 1:
+ return Memory::Read8(addr);
+ case 2:
+ return Memory::Read16(addr);
+ case 4:
+ return Memory::Read32(addr);
+ case 8:
+ return Memory::Read64(addr);
+ default:
+ UNREACHABLE();
+ return 0;
+ }
+}
+
+void MemoryWriteWidth(u32 width, VAddr addr, u64 value) {
+ switch (width) {
+ case 1:
+ Memory::Write8(addr, static_cast<u8>(value));
+ break;
+ case 2:
+ Memory::Write16(addr, static_cast<u16>(value));
+ break;
+ case 4:
+ Memory::Write32(addr, static_cast<u32>(value));
+ break;
+ case 8:
+ Memory::Write64(addr, value);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+} // Anonymous namespace
+
+Freezer::Freezer(Core::Timing::CoreTiming& core_timing) : core_timing(core_timing) {
+ event = core_timing.RegisterEvent(
+ "MemoryFreezer::FrameCallback",
+ [this](u64 userdata, s64 cycles_late) { FrameCallback(userdata, cycles_late); });
+ core_timing.ScheduleEvent(MEMORY_FREEZER_TICKS, event);
+}
+
+Freezer::~Freezer() {
+ core_timing.UnscheduleEvent(event, 0);
+}
+
+void Freezer::SetActive(bool active) {
+ if (!this->active.exchange(active)) {
+ FillEntryReads();
+ core_timing.ScheduleEvent(MEMORY_FREEZER_TICKS, event);
+ LOG_DEBUG(Common_Memory, "Memory freezer activated!");
+ } else {
+ LOG_DEBUG(Common_Memory, "Memory freezer deactivated!");
+ }
+}
+
+bool Freezer::IsActive() const {
+ return active.load(std::memory_order_relaxed);
+}
+
+void Freezer::Clear() {
+ std::lock_guard lock{entries_mutex};
+
+ LOG_DEBUG(Common_Memory, "Clearing all frozen memory values.");
+
+ entries.clear();
+}
+
+u64 Freezer::Freeze(VAddr address, u32 width) {
+ std::lock_guard lock{entries_mutex};
+
+ const auto current_value = MemoryReadWidth(width, address);
+ entries.push_back({address, width, current_value});
+
+ LOG_DEBUG(Common_Memory,
+ "Freezing memory for address={:016X}, width={:02X}, current_value={:016X}", address,
+ width, current_value);
+
+ return current_value;
+}
+
+void Freezer::Unfreeze(VAddr address) {
+ std::lock_guard lock{entries_mutex};
+
+ LOG_DEBUG(Common_Memory, "Unfreezing memory for address={:016X}", address);
+
+ entries.erase(
+ std::remove_if(entries.begin(), entries.end(),
+ [&address](const Entry& entry) { return entry.address == address; }),
+ entries.end());
+}
+
+bool Freezer::IsFrozen(VAddr address) const {
+ std::lock_guard lock{entries_mutex};
+
+ return std::find_if(entries.begin(), entries.end(), [&address](const Entry& entry) {
+ return entry.address == address;
+ }) != entries.end();
+}
+
+void Freezer::SetFrozenValue(VAddr address, u64 value) {
+ std::lock_guard lock{entries_mutex};
+
+ const auto iter = std::find_if(entries.begin(), entries.end(), [&address](const Entry& entry) {
+ return entry.address == address;
+ });
+
+ if (iter == entries.end()) {
+ LOG_ERROR(Common_Memory,
+ "Tried to set freeze value for address={:016X} that is not frozen!", address);
+ return;
+ }
+
+ LOG_DEBUG(Common_Memory,
+ "Manually overridden freeze value for address={:016X}, width={:02X} to value={:016X}",
+ iter->address, iter->width, value);
+ iter->value = value;
+}
+
+std::optional<Freezer::Entry> Freezer::GetEntry(VAddr address) const {
+ std::lock_guard lock{entries_mutex};
+
+ const auto iter = std::find_if(entries.begin(), entries.end(), [&address](const Entry& entry) {
+ return entry.address == address;
+ });
+
+ if (iter == entries.end()) {
+ return std::nullopt;
+ }
+
+ return *iter;
+}
+
+std::vector<Freezer::Entry> Freezer::GetEntries() const {
+ std::lock_guard lock{entries_mutex};
+
+ return entries;
+}
+
+void Freezer::FrameCallback(u64 userdata, s64 cycles_late) {
+ if (!IsActive()) {
+ LOG_DEBUG(Common_Memory, "Memory freezer has been deactivated, ending callback events.");
+ return;
+ }
+
+ std::lock_guard lock{entries_mutex};
+
+ for (const auto& entry : entries) {
+ LOG_DEBUG(Common_Memory,
+ "Enforcing memory freeze at address={:016X}, value={:016X}, width={:02X}",
+ entry.address, entry.value, entry.width);
+ MemoryWriteWidth(entry.width, entry.address, entry.value);
+ }
+
+ core_timing.ScheduleEvent(MEMORY_FREEZER_TICKS - cycles_late, event);
+}
+
+void Freezer::FillEntryReads() {
+ std::lock_guard lock{entries_mutex};
+
+ LOG_DEBUG(Common_Memory, "Updating memory freeze entries to current values.");
+
+ for (auto& entry : entries) {
+ entry.value = MemoryReadWidth(entry.width, entry.address);
+ }
+}
+
+} // namespace Tools
diff --git a/src/core/tools/freezer.h b/src/core/tools/freezer.h
new file mode 100644
index 000000000..b58de5472
--- /dev/null
+++ b/src/core/tools/freezer.h
@@ -0,0 +1,82 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <atomic>
+#include <mutex>
+#include <optional>
+#include <vector>
+#include "common/common_types.h"
+
+namespace Core::Timing {
+class CoreTiming;
+struct EventType;
+} // namespace Core::Timing
+
+namespace Tools {
+
+/**
+ * This class allows the user to prevent an application from writing new values to certain memory
+ * locations. This has a variety of uses when attempting to reverse a game.
+ *
+ * One example could be a cheat to prevent Mario from taking damage in SMO. One could freeze the
+ * memory address that the game uses to store Mario's health so when he takes damage (and the game
+ * tries to write the new health value to memory), the value won't change.
+ */
+class Freezer {
+public:
+ struct Entry {
+ VAddr address;
+ u32 width;
+ u64 value;
+ };
+
+ explicit Freezer(Core::Timing::CoreTiming& core_timing);
+ ~Freezer();
+
+ // Enables or disables the entire memory freezer.
+ void SetActive(bool active);
+
+ // Returns whether or not the freezer is active.
+ bool IsActive() const;
+
+ // Removes all entries from the freezer.
+ void Clear();
+
+ // Freezes a value to its current memory address. The value the memory is kept at will be the
+ // value that is read during this function. Width can be 1, 2, 4, or 8 (in bytes).
+ u64 Freeze(VAddr address, u32 width);
+
+ // Unfreezes the memory value at address. If the address isn't frozen, this is a no-op.
+ void Unfreeze(VAddr address);
+
+ // Returns whether or not the address is frozen.
+ bool IsFrozen(VAddr address) const;
+
+ // Sets the value that address should be frozen to. This doesn't change the width set by using
+ // Freeze(). If the value isn't frozen, this will not freeze it and is thus a no-op.
+ void SetFrozenValue(VAddr address, u64 value);
+
+ // Returns the entry corresponding to the address if the address is frozen, otherwise
+ // std::nullopt.
+ std::optional<Entry> GetEntry(VAddr address) const;
+
+ // Returns all the entries in the freezer, an empty vector means nothing is frozen.
+ std::vector<Entry> GetEntries() const;
+
+private:
+ void FrameCallback(u64 userdata, s64 cycles_late);
+ void FillEntryReads();
+
+ std::atomic_bool active{false};
+
+ mutable std::mutex entries_mutex;
+ std::vector<Entry> entries;
+
+ Core::Timing::EventType* event;
+ Core::Timing::CoreTiming& core_timing;
+};
+
+} // namespace Tools
diff --git a/src/tests/core/core_timing.cpp b/src/tests/core/core_timing.cpp
index 340d6a272..f8be8fd19 100644
--- a/src/tests/core/core_timing.cpp
+++ b/src/tests/core/core_timing.cpp
@@ -99,24 +99,24 @@ TEST_CASE("CoreTiming[Threadsave]", "[core]") {
core_timing.Advance();
// D -> B -> C -> A -> E
- core_timing.ScheduleEventThreadsafe(1000, cb_a, CB_IDS[0]);
- // Manually force since ScheduleEventThreadsafe doesn't call it
+ core_timing.ScheduleEvent(1000, cb_a, CB_IDS[0]);
+ // Manually force since ScheduleEvent doesn't call it
core_timing.ForceExceptionCheck(1000);
REQUIRE(1000 == core_timing.GetDowncount());
- core_timing.ScheduleEventThreadsafe(500, cb_b, CB_IDS[1]);
- // Manually force since ScheduleEventThreadsafe doesn't call it
+ core_timing.ScheduleEvent(500, cb_b, CB_IDS[1]);
+ // Manually force since ScheduleEvent doesn't call it
core_timing.ForceExceptionCheck(500);
REQUIRE(500 == core_timing.GetDowncount());
- core_timing.ScheduleEventThreadsafe(800, cb_c, CB_IDS[2]);
- // Manually force since ScheduleEventThreadsafe doesn't call it
+ core_timing.ScheduleEvent(800, cb_c, CB_IDS[2]);
+ // Manually force since ScheduleEvent doesn't call it
core_timing.ForceExceptionCheck(800);
REQUIRE(500 == core_timing.GetDowncount());
- core_timing.ScheduleEventThreadsafe(100, cb_d, CB_IDS[3]);
- // Manually force since ScheduleEventThreadsafe doesn't call it
+ core_timing.ScheduleEvent(100, cb_d, CB_IDS[3]);
+ // Manually force since ScheduleEvent doesn't call it
core_timing.ForceExceptionCheck(100);
REQUIRE(100 == core_timing.GetDowncount());
- core_timing.ScheduleEventThreadsafe(1200, cb_e, CB_IDS[4]);
- // Manually force since ScheduleEventThreadsafe doesn't call it
+ core_timing.ScheduleEvent(1200, cb_e, CB_IDS[4]);
+ // Manually force since ScheduleEvent doesn't call it
core_timing.ForceExceptionCheck(1200);
REQUIRE(100 == core_timing.GetDowncount());
diff --git a/src/video_core/CMakeLists.txt b/src/video_core/CMakeLists.txt
index f8b67cbe1..6839abe71 100644
--- a/src/video_core/CMakeLists.txt
+++ b/src/video_core/CMakeLists.txt
@@ -41,12 +41,12 @@ add_library(video_core STATIC
renderer_opengl/gl_buffer_cache.h
renderer_opengl/gl_device.cpp
renderer_opengl/gl_device.h
+ renderer_opengl/gl_framebuffer_cache.cpp
+ renderer_opengl/gl_framebuffer_cache.h
renderer_opengl/gl_global_cache.cpp
renderer_opengl/gl_global_cache.h
renderer_opengl/gl_rasterizer.cpp
renderer_opengl/gl_rasterizer.h
- renderer_opengl/gl_rasterizer_cache.cpp
- renderer_opengl/gl_rasterizer_cache.h
renderer_opengl/gl_resource_manager.cpp
renderer_opengl/gl_resource_manager.h
renderer_opengl/gl_sampler_cache.cpp
@@ -67,6 +67,8 @@ add_library(video_core STATIC
renderer_opengl/gl_state.h
renderer_opengl/gl_stream_buffer.cpp
renderer_opengl/gl_stream_buffer.h
+ renderer_opengl/gl_texture_cache.cpp
+ renderer_opengl/gl_texture_cache.h
renderer_opengl/maxwell_to_gl.h
renderer_opengl/renderer_opengl.cpp
renderer_opengl/renderer_opengl.h
@@ -88,6 +90,7 @@ add_library(video_core STATIC
shader/decode/conversion.cpp
shader/decode/memory.cpp
shader/decode/texture.cpp
+ shader/decode/image.cpp
shader/decode/float_set_predicate.cpp
shader/decode/integer_set_predicate.cpp
shader/decode/half_set_predicate.cpp
@@ -109,6 +112,13 @@ add_library(video_core STATIC
shader/track.cpp
surface.cpp
surface.h
+ texture_cache/surface_base.cpp
+ texture_cache/surface_base.h
+ texture_cache/surface_params.cpp
+ texture_cache/surface_params.h
+ texture_cache/surface_view.cpp
+ texture_cache/surface_view.h
+ texture_cache/texture_cache.h
textures/astc.cpp
textures/astc.h
textures/convert.cpp
@@ -116,8 +126,6 @@ add_library(video_core STATIC
textures/decoders.cpp
textures/decoders.h
textures/texture.h
- texture_cache.cpp
- texture_cache.h
video_core.cpp
video_core.h
)
diff --git a/src/video_core/engines/engine_upload.cpp b/src/video_core/engines/engine_upload.cpp
index 082a40cd9..d44ad0cd8 100644
--- a/src/video_core/engines/engine_upload.cpp
+++ b/src/video_core/engines/engine_upload.cpp
@@ -36,10 +36,10 @@ void State::ProcessData(const u32 data, const bool is_last_call) {
} else {
UNIMPLEMENTED_IF(regs.dest.z != 0);
UNIMPLEMENTED_IF(regs.dest.depth != 1);
- UNIMPLEMENTED_IF(regs.dest.BlockWidth() != 1);
- UNIMPLEMENTED_IF(regs.dest.BlockDepth() != 1);
+ UNIMPLEMENTED_IF(regs.dest.BlockWidth() != 0);
+ UNIMPLEMENTED_IF(regs.dest.BlockDepth() != 0);
const std::size_t dst_size = Tegra::Texture::CalculateSize(
- true, 1, regs.dest.width, regs.dest.height, 1, regs.dest.BlockHeight(), 1);
+ true, 1, regs.dest.width, regs.dest.height, 1, regs.dest.BlockHeight(), 0);
tmp_buffer.resize(dst_size);
memory_manager.ReadBlock(address, tmp_buffer.data(), dst_size);
Tegra::Texture::SwizzleKepler(regs.dest.width, regs.dest.height, regs.dest.x, regs.dest.y,
diff --git a/src/video_core/engines/engine_upload.h b/src/video_core/engines/engine_upload.h
index ef4f5839a..462da419e 100644
--- a/src/video_core/engines/engine_upload.h
+++ b/src/video_core/engines/engine_upload.h
@@ -39,15 +39,15 @@ struct Registers {
}
u32 BlockWidth() const {
- return 1U << block_width.Value();
+ return block_width.Value();
}
u32 BlockHeight() const {
- return 1U << block_height.Value();
+ return block_height.Value();
}
u32 BlockDepth() const {
- return 1U << block_depth.Value();
+ return block_depth.Value();
}
} dest;
};
diff --git a/src/video_core/engines/fermi_2d.cpp b/src/video_core/engines/fermi_2d.cpp
index 55966eef1..0ee228e28 100644
--- a/src/video_core/engines/fermi_2d.cpp
+++ b/src/video_core/engines/fermi_2d.cpp
@@ -4,7 +4,6 @@
#include "common/assert.h"
#include "common/logging/log.h"
-#include "common/math_util.h"
#include "video_core/engines/fermi_2d.h"
#include "video_core/memory_manager.h"
#include "video_core/rasterizer_interface.h"
@@ -35,21 +34,31 @@ void Fermi2D::HandleSurfaceCopy() {
static_cast<u32>(regs.operation));
// TODO(Subv): Only raw copies are implemented.
- ASSERT(regs.operation == Regs::Operation::SrcCopy);
+ ASSERT(regs.operation == Operation::SrcCopy);
const u32 src_blit_x1{static_cast<u32>(regs.blit_src_x >> 32)};
const u32 src_blit_y1{static_cast<u32>(regs.blit_src_y >> 32)};
- const u32 src_blit_x2{
- static_cast<u32>((regs.blit_src_x + (regs.blit_dst_width * regs.blit_du_dx)) >> 32)};
- const u32 src_blit_y2{
- static_cast<u32>((regs.blit_src_y + (regs.blit_dst_height * regs.blit_dv_dy)) >> 32)};
-
+ u32 src_blit_x2, src_blit_y2;
+ if (regs.blit_control.origin == Origin::Corner) {
+ src_blit_x2 =
+ static_cast<u32>((regs.blit_src_x + (regs.blit_du_dx * regs.blit_dst_width)) >> 32);
+ src_blit_y2 =
+ static_cast<u32>((regs.blit_src_y + (regs.blit_dv_dy * regs.blit_dst_height)) >> 32);
+ } else {
+ src_blit_x2 = static_cast<u32>((regs.blit_src_x >> 32) + regs.blit_dst_width);
+ src_blit_y2 = static_cast<u32>((regs.blit_src_y >> 32) + regs.blit_dst_height);
+ }
const Common::Rectangle<u32> src_rect{src_blit_x1, src_blit_y1, src_blit_x2, src_blit_y2};
const Common::Rectangle<u32> dst_rect{regs.blit_dst_x, regs.blit_dst_y,
regs.blit_dst_x + regs.blit_dst_width,
regs.blit_dst_y + regs.blit_dst_height};
+ Config copy_config;
+ copy_config.operation = regs.operation;
+ copy_config.filter = regs.blit_control.filter;
+ copy_config.src_rect = src_rect;
+ copy_config.dst_rect = dst_rect;
- if (!rasterizer.AccelerateSurfaceCopy(regs.src, regs.dst, src_rect, dst_rect)) {
+ if (!rasterizer.AccelerateSurfaceCopy(regs.src, regs.dst, copy_config)) {
UNIMPLEMENTED();
}
}
diff --git a/src/video_core/engines/fermi_2d.h b/src/video_core/engines/fermi_2d.h
index 45f59a4d9..05421d185 100644
--- a/src/video_core/engines/fermi_2d.h
+++ b/src/video_core/engines/fermi_2d.h
@@ -9,6 +9,7 @@
#include "common/bit_field.h"
#include "common/common_funcs.h"
#include "common/common_types.h"
+#include "common/math_util.h"
#include "video_core/gpu.h"
namespace Tegra {
@@ -38,6 +39,26 @@ public:
/// Write the value to the register identified by method.
void CallMethod(const GPU::MethodCall& method_call);
+ enum class Origin : u32 {
+ Center = 0,
+ Corner = 1,
+ };
+
+ enum class Filter : u32 {
+ PointSample = 0, // Nearest
+ Linear = 1,
+ };
+
+ enum class Operation : u32 {
+ SrcCopyAnd = 0,
+ ROPAnd = 1,
+ Blend = 2,
+ SrcCopy = 3,
+ ROP = 4,
+ SrcCopyPremult = 5,
+ BlendPremult = 6,
+ };
+
struct Regs {
static constexpr std::size_t NUM_REGS = 0x258;
@@ -63,32 +84,19 @@ public:
}
u32 BlockWidth() const {
- // The block width is stored in log2 format.
- return 1 << block_width;
+ return block_width.Value();
}
u32 BlockHeight() const {
- // The block height is stored in log2 format.
- return 1 << block_height;
+ return block_height.Value();
}
u32 BlockDepth() const {
- // The block depth is stored in log2 format.
- return 1 << block_depth;
+ return block_depth.Value();
}
};
static_assert(sizeof(Surface) == 0x28, "Surface has incorrect size");
- enum class Operation : u32 {
- SrcCopyAnd = 0,
- ROPAnd = 1,
- Blend = 2,
- SrcCopy = 3,
- ROP = 4,
- SrcCopyPremult = 5,
- BlendPremult = 6,
- };
-
union {
struct {
INSERT_PADDING_WORDS(0x80);
@@ -105,7 +113,11 @@ public:
INSERT_PADDING_WORDS(0x177);
- u32 blit_control;
+ union {
+ u32 raw;
+ BitField<0, 1, Origin> origin;
+ BitField<4, 1, Filter> filter;
+ } blit_control;
INSERT_PADDING_WORDS(0x8);
@@ -124,6 +136,13 @@ public:
};
} regs{};
+ struct Config {
+ Operation operation;
+ Filter filter;
+ Common::Rectangle<u32> src_rect;
+ Common::Rectangle<u32> dst_rect;
+ };
+
private:
VideoCore::RasterizerInterface& rasterizer;
MemoryManager& memory_manager;
diff --git a/src/video_core/engines/maxwell_3d.cpp b/src/video_core/engines/maxwell_3d.cpp
index 08d553696..8755b8af4 100644
--- a/src/video_core/engines/maxwell_3d.cpp
+++ b/src/video_core/engines/maxwell_3d.cpp
@@ -430,14 +430,10 @@ Texture::TICEntry Maxwell3D::GetTICEntry(u32 tic_index) const {
Texture::TICEntry tic_entry;
memory_manager.ReadBlockUnsafe(tic_address_gpu, &tic_entry, sizeof(Texture::TICEntry));
- ASSERT_MSG(tic_entry.header_version == Texture::TICHeaderVersion::BlockLinear ||
- tic_entry.header_version == Texture::TICHeaderVersion::Pitch,
- "TIC versions other than BlockLinear or Pitch are unimplemented");
-
- const auto r_type = tic_entry.r_type.Value();
- const auto g_type = tic_entry.g_type.Value();
- const auto b_type = tic_entry.b_type.Value();
- const auto a_type = tic_entry.a_type.Value();
+ const auto r_type{tic_entry.r_type.Value()};
+ const auto g_type{tic_entry.g_type.Value()};
+ const auto b_type{tic_entry.b_type.Value()};
+ const auto a_type{tic_entry.a_type.Value()};
// TODO(Subv): Different data types for separate components are not supported
DEBUG_ASSERT(r_type == g_type && r_type == b_type && r_type == a_type);
diff --git a/src/video_core/engines/maxwell_dma.cpp b/src/video_core/engines/maxwell_dma.cpp
index 3a5dfef0c..afb9578d0 100644
--- a/src/video_core/engines/maxwell_dma.cpp
+++ b/src/video_core/engines/maxwell_dma.cpp
@@ -111,7 +111,7 @@ void MaxwellDMA::HandleCopy() {
memory_manager.WriteBlock(dest, write_buffer.data(), dst_size);
} else {
- ASSERT(regs.dst_params.BlockDepth() == 1);
+ ASSERT(regs.dst_params.BlockDepth() == 0);
const u32 src_bytes_per_pixel = regs.src_pitch / regs.x_count;
diff --git a/src/video_core/engines/maxwell_dma.h b/src/video_core/engines/maxwell_dma.h
index e5942f671..17b015ca7 100644
--- a/src/video_core/engines/maxwell_dma.h
+++ b/src/video_core/engines/maxwell_dma.h
@@ -59,11 +59,11 @@ public:
};
u32 BlockHeight() const {
- return 1 << block_height;
+ return block_height.Value();
}
u32 BlockDepth() const {
- return 1 << block_depth;
+ return block_depth.Value();
}
};
diff --git a/src/video_core/engines/shader_bytecode.h b/src/video_core/engines/shader_bytecode.h
index ffb3ec3e0..404d4f5aa 100644
--- a/src/video_core/engines/shader_bytecode.h
+++ b/src/video_core/engines/shader_bytecode.h
@@ -4,6 +4,7 @@
#pragma once
+#include <array>
#include <bitset>
#include <optional>
#include <tuple>
@@ -126,6 +127,15 @@ union Sampler {
u64 value{};
};
+union Image {
+ Image() = default;
+
+ constexpr explicit Image(u64 value) : value{value} {}
+
+ BitField<36, 13, u64> index;
+ u64 value;
+};
+
} // namespace Tegra::Shader
namespace std {
@@ -344,6 +354,26 @@ enum class TextureMiscMode : u64 {
PTP,
};
+enum class SurfaceDataMode : u64 {
+ P = 0,
+ D_BA = 1,
+};
+
+enum class OutOfBoundsStore : u64 {
+ Ignore = 0,
+ Clamp = 1,
+ Trap = 2,
+};
+
+enum class ImageType : u64 {
+ Texture1D = 0,
+ TextureBuffer = 1,
+ Texture1DArray = 2,
+ Texture2D = 3,
+ Texture2DArray = 4,
+ Texture3D = 5,
+};
+
enum class IsberdMode : u64 {
None = 0,
Patch = 1,
@@ -398,7 +428,7 @@ enum class LmemLoadCacheManagement : u64 {
CV = 3,
};
-enum class LmemStoreCacheManagement : u64 {
+enum class StoreCacheManagement : u64 {
Default = 0,
CG = 1,
CS = 2,
@@ -811,7 +841,7 @@ union Instruction {
} ld_l;
union {
- BitField<44, 2, LmemStoreCacheManagement> cache_management;
+ BitField<44, 2, StoreCacheManagement> cache_management;
} st_l;
union {
@@ -1232,6 +1262,20 @@ union Instruction {
} texs;
union {
+ BitField<28, 1, u64> is_array;
+ BitField<29, 2, TextureType> texture_type;
+ BitField<35, 1, u64> aoffi;
+ BitField<49, 1, u64> nodep_flag;
+ BitField<50, 1, u64> ms; // Multisample?
+ BitField<54, 1, u64> cl;
+ BitField<55, 1, u64> process_mode;
+
+ TextureProcessMode GetTextureProcessMode() const {
+ return process_mode == 0 ? TextureProcessMode::LZ : TextureProcessMode::LL;
+ }
+ } tld;
+
+ union {
BitField<49, 1, u64> nodep_flag;
BitField<53, 4, u64> texture_info;
@@ -1281,6 +1325,35 @@ union Instruction {
} tlds;
union {
+ BitField<24, 2, StoreCacheManagement> cache_management;
+ BitField<33, 3, ImageType> image_type;
+ BitField<49, 2, OutOfBoundsStore> out_of_bounds_store;
+ BitField<51, 1, u64> is_immediate;
+ BitField<52, 1, SurfaceDataMode> mode;
+
+ BitField<20, 3, StoreType> store_data_layout;
+ BitField<20, 4, u64> component_mask_selector;
+
+ bool IsComponentEnabled(std::size_t component) const {
+ ASSERT(mode == SurfaceDataMode::P);
+ constexpr u8 R = 0b0001;
+ constexpr u8 G = 0b0010;
+ constexpr u8 B = 0b0100;
+ constexpr u8 A = 0b1000;
+ constexpr std::array<u8, 16> mask = {
+ 0, (R), (G), (R | G), (B), (R | B),
+ (G | B), (R | G | B), (A), (R | A), (G | A), (R | G | A),
+ (B | A), (R | B | A), (G | B | A), (R | G | B | A)};
+ return std::bitset<4>{mask.at(component_mask_selector)}.test(component);
+ }
+
+ StoreType GetStoreDataLayout() const {
+ ASSERT(mode == SurfaceDataMode::D_BA);
+ return store_data_layout;
+ }
+ } sust;
+
+ union {
BitField<20, 24, u64> target;
BitField<5, 1, u64> constant_buffer;
@@ -1371,6 +1444,7 @@ union Instruction {
Attribute attribute;
Sampler sampler;
+ Image image;
u64 value;
};
@@ -1408,11 +1482,13 @@ public:
TXQ, // Texture Query
TXQ_B, // Texture Query Bindless
TEXS, // Texture Fetch with scalar/non-vec4 source/destinations
+ TLD, // Texture Load
TLDS, // Texture Load with scalar/non-vec4 source/destinations
TLD4, // Texture Load 4
TLD4S, // Texture Load 4 with scalar / non - vec4 source / destinations
TMML_B, // Texture Mip Map Level
TMML, // Texture Mip Map Level
+ SUST, // Surface Store
EXIT,
IPA,
OUT_R, // Emit vertex/primitive
@@ -1543,6 +1619,7 @@ public:
Synch,
Memory,
Texture,
+ Image,
FloatSet,
FloatSetPredicate,
IntegerSet,
@@ -1682,11 +1759,13 @@ private:
INST("1101111101001---", Id::TXQ, Type::Texture, "TXQ"),
INST("1101111101010---", Id::TXQ_B, Type::Texture, "TXQ_B"),
INST("1101-00---------", Id::TEXS, Type::Texture, "TEXS"),
+ INST("11011100--11----", Id::TLD, Type::Texture, "TLD"),
INST("1101101---------", Id::TLDS, Type::Texture, "TLDS"),
INST("110010----111---", Id::TLD4, Type::Texture, "TLD4"),
INST("1101111100------", Id::TLD4S, Type::Texture, "TLD4S"),
INST("110111110110----", Id::TMML_B, Type::Texture, "TMML_B"),
INST("1101111101011---", Id::TMML, Type::Texture, "TMML"),
+ INST("11101011001-----", Id::SUST, Type::Image, "SUST"),
INST("11100000--------", Id::IPA, Type::Trivial, "IPA"),
INST("1111101111100---", Id::OUT_R, Type::Trivial, "OUT_R"),
INST("1110111111010---", Id::ISBERD, Type::Trivial, "ISBERD"),
diff --git a/src/video_core/memory_manager.cpp b/src/video_core/memory_manager.cpp
index 5d8d126c1..322453116 100644
--- a/src/video_core/memory_manager.cpp
+++ b/src/video_core/memory_manager.cpp
@@ -202,11 +202,12 @@ const u8* MemoryManager::GetPointer(GPUVAddr addr) const {
}
bool MemoryManager::IsBlockContinuous(const GPUVAddr start, const std::size_t size) const {
- const GPUVAddr end = start + size;
+ const std::size_t inner_size = size - 1;
+ const GPUVAddr end = start + inner_size;
const auto host_ptr_start = reinterpret_cast<std::uintptr_t>(GetPointer(start));
const auto host_ptr_end = reinterpret_cast<std::uintptr_t>(GetPointer(end));
const auto range = static_cast<std::size_t>(host_ptr_end - host_ptr_start);
- return range == size;
+ return range == inner_size;
}
void MemoryManager::ReadBlock(GPUVAddr src_addr, void* dest_buffer, const std::size_t size) const {
diff --git a/src/video_core/rasterizer_cache.h b/src/video_core/rasterizer_cache.h
index 0c4ea1494..6de1597a2 100644
--- a/src/video_core/rasterizer_cache.h
+++ b/src/video_core/rasterizer_cache.h
@@ -169,6 +169,8 @@ protected:
object->MarkAsModified(false, *this);
}
+ std::recursive_mutex mutex;
+
private:
/// Returns a list of cached objects from the specified memory region, ordered by access time
std::vector<T> GetSortedObjectsFromRegion(CacheAddr addr, u64 size) {
@@ -208,5 +210,4 @@ private:
IntervalCache interval_cache; ///< Cache of objects
u64 modified_ticks{}; ///< Counter of cache state ticks, used for in-order flushing
VideoCore::RasterizerInterface& rasterizer;
- std::recursive_mutex mutex;
};
diff --git a/src/video_core/rasterizer_interface.h b/src/video_core/rasterizer_interface.h
index d7b86df38..5ee4f8e8e 100644
--- a/src/video_core/rasterizer_interface.h
+++ b/src/video_core/rasterizer_interface.h
@@ -10,6 +10,10 @@
#include "video_core/engines/fermi_2d.h"
#include "video_core/gpu.h"
+namespace Tegra {
+class MemoryManager;
+}
+
namespace VideoCore {
enum class LoadCallbackStage {
@@ -46,8 +50,7 @@ public:
/// Attempt to use a faster method to perform a surface copy
virtual bool AccelerateSurfaceCopy(const Tegra::Engines::Fermi2D::Regs::Surface& src,
const Tegra::Engines::Fermi2D::Regs::Surface& dst,
- const Common::Rectangle<u32>& src_rect,
- const Common::Rectangle<u32>& dst_rect) {
+ const Tegra::Engines::Fermi2D::Config& copy_config) {
return false;
}
diff --git a/src/video_core/renderer_opengl/gl_buffer_cache.cpp b/src/video_core/renderer_opengl/gl_buffer_cache.cpp
index 48b86f3bd..2b9bd142e 100644
--- a/src/video_core/renderer_opengl/gl_buffer_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_buffer_cache.cpp
@@ -23,6 +23,7 @@ OGLBufferCache::OGLBufferCache(RasterizerOpenGL& rasterizer, std::size_t size)
GLintptr OGLBufferCache::UploadMemory(GPUVAddr gpu_addr, std::size_t size, std::size_t alignment,
bool cache) {
+ std::lock_guard lock{mutex};
auto& memory_manager = Core::System::GetInstance().GPU().MemoryManager();
// Cache management is a big overhead, so only cache entries with a given size.
@@ -62,6 +63,7 @@ GLintptr OGLBufferCache::UploadMemory(GPUVAddr gpu_addr, std::size_t size, std::
GLintptr OGLBufferCache::UploadHostMemory(const void* raw_pointer, std::size_t size,
std::size_t alignment) {
+ std::lock_guard lock{mutex};
AlignBuffer(alignment);
std::memcpy(buffer_ptr, raw_pointer, size);
const GLintptr uploaded_offset = buffer_offset;
diff --git a/src/video_core/renderer_opengl/gl_framebuffer_cache.cpp b/src/video_core/renderer_opengl/gl_framebuffer_cache.cpp
new file mode 100644
index 000000000..7c926bd48
--- /dev/null
+++ b/src/video_core/renderer_opengl/gl_framebuffer_cache.cpp
@@ -0,0 +1,75 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include <tuple>
+
+#include "common/cityhash.h"
+#include "common/scope_exit.h"
+#include "video_core/engines/maxwell_3d.h"
+#include "video_core/renderer_opengl/gl_framebuffer_cache.h"
+#include "video_core/renderer_opengl/gl_state.h"
+
+namespace OpenGL {
+
+using Maxwell = Tegra::Engines::Maxwell3D::Regs;
+
+FramebufferCacheOpenGL::FramebufferCacheOpenGL() = default;
+
+FramebufferCacheOpenGL::~FramebufferCacheOpenGL() = default;
+
+GLuint FramebufferCacheOpenGL::GetFramebuffer(const FramebufferCacheKey& key) {
+ const auto [entry, is_cache_miss] = cache.try_emplace(key);
+ auto& framebuffer{entry->second};
+ if (is_cache_miss) {
+ framebuffer = CreateFramebuffer(key);
+ }
+ return framebuffer.handle;
+}
+
+OGLFramebuffer FramebufferCacheOpenGL::CreateFramebuffer(const FramebufferCacheKey& key) {
+ OGLFramebuffer framebuffer;
+ framebuffer.Create();
+
+ // TODO(Rodrigo): Use DSA here after Nvidia fixes their framebuffer DSA bugs.
+ local_state.draw.draw_framebuffer = framebuffer.handle;
+ local_state.ApplyFramebufferState();
+
+ if (key.is_single_buffer) {
+ if (key.color_attachments[0] != GL_NONE && key.colors[0]) {
+ key.colors[0]->Attach(key.color_attachments[0], GL_DRAW_FRAMEBUFFER);
+ glDrawBuffer(key.color_attachments[0]);
+ } else {
+ glDrawBuffer(GL_NONE);
+ }
+ } else {
+ for (std::size_t index = 0; index < Maxwell::NumRenderTargets; ++index) {
+ if (key.colors[index]) {
+ key.colors[index]->Attach(GL_COLOR_ATTACHMENT0 + static_cast<GLenum>(index),
+ GL_DRAW_FRAMEBUFFER);
+ }
+ }
+ glDrawBuffers(key.colors_count, key.color_attachments.data());
+ }
+
+ if (key.zeta) {
+ key.zeta->Attach(key.stencil_enable ? GL_DEPTH_STENCIL_ATTACHMENT : GL_DEPTH_ATTACHMENT,
+ GL_DRAW_FRAMEBUFFER);
+ }
+
+ return framebuffer;
+}
+
+std::size_t FramebufferCacheKey::Hash() const {
+ static_assert(sizeof(*this) % sizeof(u64) == 0, "Unaligned struct");
+ return static_cast<std::size_t>(
+ Common::CityHash64(reinterpret_cast<const char*>(this), sizeof(*this)));
+}
+
+bool FramebufferCacheKey::operator==(const FramebufferCacheKey& rhs) const {
+ return std::tie(is_single_buffer, stencil_enable, colors_count, color_attachments, colors,
+ zeta) == std::tie(rhs.is_single_buffer, rhs.stencil_enable, rhs.colors_count,
+ rhs.color_attachments, rhs.colors, rhs.zeta);
+}
+
+} // namespace OpenGL
diff --git a/src/video_core/renderer_opengl/gl_framebuffer_cache.h b/src/video_core/renderer_opengl/gl_framebuffer_cache.h
new file mode 100644
index 000000000..a3a996353
--- /dev/null
+++ b/src/video_core/renderer_opengl/gl_framebuffer_cache.h
@@ -0,0 +1,68 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <array>
+#include <cstddef>
+#include <unordered_map>
+
+#include <glad/glad.h>
+
+#include "common/common_types.h"
+#include "video_core/engines/maxwell_3d.h"
+#include "video_core/renderer_opengl/gl_resource_manager.h"
+#include "video_core/renderer_opengl/gl_state.h"
+#include "video_core/renderer_opengl/gl_texture_cache.h"
+
+namespace OpenGL {
+
+struct alignas(sizeof(u64)) FramebufferCacheKey {
+ bool is_single_buffer = false;
+ bool stencil_enable = false;
+ u16 colors_count = 0;
+
+ std::array<GLenum, Tegra::Engines::Maxwell3D::Regs::NumRenderTargets> color_attachments{};
+ std::array<View, Tegra::Engines::Maxwell3D::Regs::NumRenderTargets> colors;
+ View zeta;
+
+ std::size_t Hash() const;
+
+ bool operator==(const FramebufferCacheKey& rhs) const;
+
+ bool operator!=(const FramebufferCacheKey& rhs) const {
+ return !operator==(rhs);
+ }
+};
+
+} // namespace OpenGL
+
+namespace std {
+
+template <>
+struct hash<OpenGL::FramebufferCacheKey> {
+ std::size_t operator()(const OpenGL::FramebufferCacheKey& k) const noexcept {
+ return k.Hash();
+ }
+};
+
+} // namespace std
+
+namespace OpenGL {
+
+class FramebufferCacheOpenGL {
+public:
+ FramebufferCacheOpenGL();
+ ~FramebufferCacheOpenGL();
+
+ GLuint GetFramebuffer(const FramebufferCacheKey& key);
+
+private:
+ OGLFramebuffer CreateFramebuffer(const FramebufferCacheKey& key);
+
+ OpenGLState local_state;
+ std::unordered_map<FramebufferCacheKey, OGLFramebuffer> cache;
+};
+
+} // namespace OpenGL
diff --git a/src/video_core/renderer_opengl/gl_global_cache.cpp b/src/video_core/renderer_opengl/gl_global_cache.cpp
index ea4a593af..d5e385151 100644
--- a/src/video_core/renderer_opengl/gl_global_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_global_cache.cpp
@@ -76,6 +76,7 @@ GlobalRegionCacheOpenGL::GlobalRegionCacheOpenGL(RasterizerOpenGL& rasterizer)
GlobalRegion GlobalRegionCacheOpenGL::GetGlobalRegion(
const GLShader::GlobalMemoryEntry& global_region,
Tegra::Engines::Maxwell3D::Regs::ShaderStage stage) {
+ std::lock_guard lock{mutex};
auto& gpu{Core::System::GetInstance().GPU()};
auto& memory_manager{gpu.MemoryManager()};
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp
index d77426067..f45a3c5ef 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer.cpp
+++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp
@@ -29,8 +29,10 @@
namespace OpenGL {
using Maxwell = Tegra::Engines::Maxwell3D::Regs;
-using PixelFormat = VideoCore::Surface::PixelFormat;
-using SurfaceType = VideoCore::Surface::SurfaceType;
+
+using VideoCore::Surface::PixelFormat;
+using VideoCore::Surface::SurfaceTarget;
+using VideoCore::Surface::SurfaceType;
MICROPROFILE_DEFINE(OpenGL_VAO, "OpenGL", "Vertex Format Setup", MP_RGB(128, 128, 192));
MICROPROFILE_DEFINE(OpenGL_VB, "OpenGL", "Vertex Buffer Setup", MP_RGB(128, 128, 192));
@@ -78,29 +80,9 @@ struct DrawParameters {
}
};
-struct FramebufferCacheKey {
- bool is_single_buffer = false;
- bool stencil_enable = false;
-
- std::array<GLenum, Maxwell::NumRenderTargets> color_attachments{};
- std::array<GLuint, Tegra::Engines::Maxwell3D::Regs::NumRenderTargets> colors{};
- u32 colors_count = 0;
-
- GLuint zeta = 0;
-
- auto Tie() const {
- return std::tie(is_single_buffer, stencil_enable, color_attachments, colors, colors_count,
- zeta);
- }
-
- bool operator<(const FramebufferCacheKey& rhs) const {
- return Tie() < rhs.Tie();
- }
-};
-
RasterizerOpenGL::RasterizerOpenGL(Core::System& system, Core::Frontend::EmuWindow& emu_window,
ScreenInfo& info)
- : res_cache{*this}, shader_cache{*this, system, emu_window, device},
+ : texture_cache{system, *this, device}, shader_cache{*this, system, emu_window, device},
global_cache{*this}, system{system}, screen_info{info},
buffer_cache(*this, STREAM_BUFFER_SIZE) {
OpenGLState::ApplyDefaultState();
@@ -121,11 +103,6 @@ void RasterizerOpenGL::CheckExtensions() {
Render_OpenGL,
"Anisotropic filter is not supported! This can cause graphical issues in some games.");
}
- if (!GLAD_GL_ARB_buffer_storage) {
- LOG_WARNING(
- Render_OpenGL,
- "Buffer storage control is not supported! This can cause performance degradation.");
- }
}
GLuint RasterizerOpenGL::SetupVertexFormat() {
@@ -302,8 +279,14 @@ void RasterizerOpenGL::SetupShaders(GLenum primitive_mode) {
static_cast<GLsizeiptr>(sizeof(ubo)));
Shader shader{shader_cache.GetStageProgram(program)};
- const auto [program_handle, next_bindings] =
- shader->GetProgramHandle(primitive_mode, base_bindings);
+
+ const auto stage_enum{static_cast<Maxwell::ShaderStage>(stage)};
+ SetupDrawConstBuffers(stage_enum, shader);
+ SetupGlobalRegions(stage_enum, shader);
+ const auto texture_buffer_usage{SetupTextures(stage_enum, shader, base_bindings)};
+
+ const ProgramVariant variant{base_bindings, primitive_mode, texture_buffer_usage};
+ const auto [program_handle, next_bindings] = shader->GetProgramHandle(variant);
switch (program) {
case Maxwell::ShaderProgram::VertexA:
@@ -321,11 +304,6 @@ void RasterizerOpenGL::SetupShaders(GLenum primitive_mode) {
shader_config.enable.Value(), shader_config.offset);
}
- const auto stage_enum = static_cast<Maxwell::ShaderStage>(stage);
- SetupDrawConstBuffers(stage_enum, shader);
- SetupGlobalRegions(stage_enum, shader);
- SetupTextures(stage_enum, shader, base_bindings);
-
// Workaround for Intel drivers.
// When a clip distance is enabled but not set in the shader it crops parts of the screen
// (sometimes it's half the screen, sometimes three quarters). To avoid this, enable the
@@ -351,44 +329,6 @@ void RasterizerOpenGL::SetupShaders(GLenum primitive_mode) {
gpu.dirty_flags.shaders = false;
}
-void RasterizerOpenGL::SetupCachedFramebuffer(const FramebufferCacheKey& fbkey,
- OpenGLState& current_state) {
- const auto [entry, is_cache_miss] = framebuffer_cache.try_emplace(fbkey);
- auto& framebuffer = entry->second;
-
- if (is_cache_miss)
- framebuffer.Create();
-
- current_state.draw.draw_framebuffer = framebuffer.handle;
- current_state.ApplyFramebufferState();
-
- if (!is_cache_miss)
- return;
-
- if (fbkey.is_single_buffer) {
- if (fbkey.color_attachments[0] != GL_NONE) {
- glFramebufferTexture(GL_DRAW_FRAMEBUFFER, fbkey.color_attachments[0], fbkey.colors[0],
- 0);
- }
- glDrawBuffer(fbkey.color_attachments[0]);
- } else {
- for (std::size_t index = 0; index < Maxwell::NumRenderTargets; ++index) {
- if (fbkey.colors[index]) {
- glFramebufferTexture(GL_DRAW_FRAMEBUFFER,
- GL_COLOR_ATTACHMENT0 + static_cast<GLenum>(index),
- fbkey.colors[index], 0);
- }
- }
- glDrawBuffers(fbkey.colors_count, fbkey.color_attachments.data());
- }
-
- if (fbkey.zeta) {
- GLenum zeta_attachment =
- fbkey.stencil_enable ? GL_DEPTH_STENCIL_ATTACHMENT : GL_DEPTH_ATTACHMENT;
- glFramebufferTexture(GL_DRAW_FRAMEBUFFER, zeta_attachment, fbkey.zeta, 0);
- }
-}
-
std::size_t RasterizerOpenGL::CalculateVertexArraysSize() const {
const auto& regs = system.GPU().Maxwell3D().regs;
@@ -478,9 +418,13 @@ std::pair<bool, bool> RasterizerOpenGL::ConfigureFramebuffers(
}
current_framebuffer_config_state = fb_config_state;
- Surface depth_surface;
+ texture_cache.GuardRenderTargets(true);
+
+ View depth_surface{};
if (using_depth_fb) {
- depth_surface = res_cache.GetDepthBufferSurface(preserve_contents);
+ depth_surface = texture_cache.GetDepthBufferSurface(preserve_contents);
+ } else {
+ texture_cache.SetEmptyDepthBuffer();
}
UNIMPLEMENTED_IF(regs.rt_separate_frag_data == 0);
@@ -493,13 +437,13 @@ std::pair<bool, bool> RasterizerOpenGL::ConfigureFramebuffers(
if (using_color_fb) {
if (single_color_target) {
// Used when just a single color attachment is enabled, e.g. for clearing a color buffer
- Surface color_surface =
- res_cache.GetColorBufferSurface(*single_color_target, preserve_contents);
+ View color_surface{
+ texture_cache.GetColorBufferSurface(*single_color_target, preserve_contents)};
if (color_surface) {
// Assume that a surface will be written to if it is used as a framebuffer, even if
// the shader doesn't actually write to it.
- color_surface->MarkAsModified(true, res_cache);
+ texture_cache.MarkColorBufferInUse(*single_color_target);
// Workaround for and issue in nvidia drivers
// https://devtalk.nvidia.com/default/topic/776591/opengl/gl_framebuffer_srgb-functions-incorrectly/
state.framebuffer_srgb.enabled |= color_surface->GetSurfaceParams().srgb_conversion;
@@ -508,16 +452,21 @@ std::pair<bool, bool> RasterizerOpenGL::ConfigureFramebuffers(
fbkey.is_single_buffer = true;
fbkey.color_attachments[0] =
GL_COLOR_ATTACHMENT0 + static_cast<GLenum>(*single_color_target);
- fbkey.colors[0] = color_surface != nullptr ? color_surface->Texture().handle : 0;
+ fbkey.colors[0] = color_surface;
+ for (std::size_t index = 0; index < Maxwell::NumRenderTargets; ++index) {
+ if (index != *single_color_target) {
+ texture_cache.SetEmptyColorBuffer(index);
+ }
+ }
} else {
// Multiple color attachments are enabled
for (std::size_t index = 0; index < Maxwell::NumRenderTargets; ++index) {
- Surface color_surface = res_cache.GetColorBufferSurface(index, preserve_contents);
+ View color_surface{texture_cache.GetColorBufferSurface(index, preserve_contents)};
if (color_surface) {
// Assume that a surface will be written to if it is used as a framebuffer, even
// if the shader doesn't actually write to it.
- color_surface->MarkAsModified(true, res_cache);
+ texture_cache.MarkColorBufferInUse(index);
// Enable sRGB only for supported formats
// Workaround for and issue in nvidia drivers
// https://devtalk.nvidia.com/default/topic/776591/opengl/gl_framebuffer_srgb-functions-incorrectly/
@@ -527,8 +476,7 @@ std::pair<bool, bool> RasterizerOpenGL::ConfigureFramebuffers(
fbkey.color_attachments[index] =
GL_COLOR_ATTACHMENT0 + regs.rt_control.GetMap(index);
- fbkey.colors[index] =
- color_surface != nullptr ? color_surface->Texture().handle : 0;
+ fbkey.colors[index] = color_surface;
}
fbkey.is_single_buffer = false;
fbkey.colors_count = regs.rt_control.count;
@@ -541,14 +489,16 @@ std::pair<bool, bool> RasterizerOpenGL::ConfigureFramebuffers(
if (depth_surface) {
// Assume that a surface will be written to if it is used as a framebuffer, even if
// the shader doesn't actually write to it.
- depth_surface->MarkAsModified(true, res_cache);
+ texture_cache.MarkDepthBufferInUse();
- fbkey.zeta = depth_surface->Texture().handle;
+ fbkey.zeta = depth_surface;
fbkey.stencil_enable = regs.stencil_enable &&
depth_surface->GetSurfaceParams().type == SurfaceType::DepthStencil;
}
- SetupCachedFramebuffer(fbkey, current_state);
+ texture_cache.GuardRenderTargets(false);
+
+ current_state.draw.draw_framebuffer = framebuffer_cache.GetFramebuffer(fbkey);
SyncViewport(current_state);
return current_depth_stencil_usage = {static_cast<bool>(depth_surface), fbkey.stencil_enable};
@@ -630,6 +580,7 @@ void RasterizerOpenGL::Clear() {
clear_state.ApplyDepth();
clear_state.ApplyStencilTest();
clear_state.ApplyViewport();
+ clear_state.ApplyFramebufferState();
if (use_color) {
glClearBufferfv(GL_COLOR, regs.clear_buffers.RT, regs.clear_color);
@@ -652,7 +603,6 @@ void RasterizerOpenGL::DrawArrays() {
auto& gpu = system.GPU().Maxwell3D();
const auto& regs = gpu.regs;
- ConfigureFramebuffers(state);
SyncColorMask();
SyncFragmentColorClampState();
SyncMultiSampleState();
@@ -697,16 +647,22 @@ void RasterizerOpenGL::DrawArrays() {
SetupVertexBuffer(vao);
DrawParameters params = SetupDraw();
+ texture_cache.GuardSamplers(true);
SetupShaders(params.primitive_mode);
+ texture_cache.GuardSamplers(false);
+
+ ConfigureFramebuffers(state);
buffer_cache.Unmap();
shader_program_manager->ApplyTo(state);
state.Apply();
- res_cache.SignalPreDrawCall();
+ if (texture_cache.TextureBarrier()) {
+ glTextureBarrier();
+ }
+
params.DispatchDraw();
- res_cache.SignalPostDrawCall();
accelerate_draw = AccelDraw::Disabled;
}
@@ -718,7 +674,7 @@ void RasterizerOpenGL::FlushRegion(CacheAddr addr, u64 size) {
if (!addr || !size) {
return;
}
- res_cache.FlushRegion(addr, size);
+ texture_cache.FlushRegion(addr, size);
global_cache.FlushRegion(addr, size);
}
@@ -727,23 +683,24 @@ void RasterizerOpenGL::InvalidateRegion(CacheAddr addr, u64 size) {
if (!addr || !size) {
return;
}
- res_cache.InvalidateRegion(addr, size);
+ texture_cache.InvalidateRegion(addr, size);
shader_cache.InvalidateRegion(addr, size);
global_cache.InvalidateRegion(addr, size);
buffer_cache.InvalidateRegion(addr, size);
}
void RasterizerOpenGL::FlushAndInvalidateRegion(CacheAddr addr, u64 size) {
- FlushRegion(addr, size);
+ if (Settings::values.use_accurate_gpu_emulation) {
+ FlushRegion(addr, size);
+ }
InvalidateRegion(addr, size);
}
bool RasterizerOpenGL::AccelerateSurfaceCopy(const Tegra::Engines::Fermi2D::Regs::Surface& src,
const Tegra::Engines::Fermi2D::Regs::Surface& dst,
- const Common::Rectangle<u32>& src_rect,
- const Common::Rectangle<u32>& dst_rect) {
+ const Tegra::Engines::Fermi2D::Config& copy_config) {
MICROPROFILE_SCOPE(OpenGL_Blits);
- res_cache.FermiCopySurface(src, dst, src_rect, dst_rect);
+ texture_cache.DoFermiCopy(src, dst, copy_config);
return true;
}
@@ -755,7 +712,8 @@ bool RasterizerOpenGL::AccelerateDisplay(const Tegra::FramebufferConfig& config,
MICROPROFILE_SCOPE(OpenGL_CacheManagement);
- const auto& surface{res_cache.TryFindFramebufferSurface(Memory::GetPointer(framebuffer_addr))};
+ const auto surface{
+ texture_cache.TryFindFramebufferSurface(Memory::GetPointer(framebuffer_addr))};
if (!surface) {
return {};
}
@@ -771,7 +729,7 @@ bool RasterizerOpenGL::AccelerateDisplay(const Tegra::FramebufferConfig& config,
LOG_WARNING(Render_OpenGL, "Framebuffer pixel_format is different");
}
- screen_info.display_texture = surface->Texture().handle;
+ screen_info.display_texture = surface->GetTexture();
return true;
}
@@ -837,8 +795,8 @@ void RasterizerOpenGL::SetupGlobalRegions(Tegra::Engines::Maxwell3D::Regs::Shade
}
}
-void RasterizerOpenGL::SetupTextures(Maxwell::ShaderStage stage, const Shader& shader,
- BaseBindings base_bindings) {
+TextureBufferUsage RasterizerOpenGL::SetupTextures(Maxwell::ShaderStage stage, const Shader& shader,
+ BaseBindings base_bindings) {
MICROPROFILE_SCOPE(OpenGL_Texture);
const auto& gpu = system.GPU();
const auto& maxwell3d = gpu.Maxwell3D();
@@ -847,6 +805,8 @@ void RasterizerOpenGL::SetupTextures(Maxwell::ShaderStage stage, const Shader& s
ASSERT_MSG(base_bindings.sampler + entries.size() <= std::size(state.texture_units),
"Exceeded the number of active textures.");
+ TextureBufferUsage texture_buffer_usage{0};
+
for (u32 bindpoint = 0; bindpoint < entries.size(); ++bindpoint) {
const auto& entry = entries[bindpoint];
Tegra::Texture::FullTextureInfo texture;
@@ -860,18 +820,26 @@ void RasterizerOpenGL::SetupTextures(Maxwell::ShaderStage stage, const Shader& s
}
const u32 current_bindpoint = base_bindings.sampler + bindpoint;
- state.texture_units[current_bindpoint].sampler = sampler_cache.GetSampler(texture.tsc);
+ auto& unit{state.texture_units[current_bindpoint]};
+ unit.sampler = sampler_cache.GetSampler(texture.tsc);
- if (Surface surface = res_cache.GetTextureSurface(texture, entry); surface) {
- state.texture_units[current_bindpoint].texture =
- surface->Texture(entry.IsArray()).handle;
- surface->UpdateSwizzle(texture.tic.x_source, texture.tic.y_source, texture.tic.z_source,
+ if (const auto view{texture_cache.GetTextureSurface(texture, entry)}; view) {
+ if (view->GetSurfaceParams().IsBuffer()) {
+ // Record that this texture is a texture buffer.
+ texture_buffer_usage.set(bindpoint);
+ } else {
+ // Apply swizzle to textures that are not buffers.
+ view->ApplySwizzle(texture.tic.x_source, texture.tic.y_source, texture.tic.z_source,
texture.tic.w_source);
+ }
+ state.texture_units[current_bindpoint].texture = view->GetTexture();
} else {
// Can occur when texture addr is null or its memory is unmapped/invalid
- state.texture_units[current_bindpoint].texture = 0;
+ unit.texture = 0;
}
}
+
+ return texture_buffer_usage;
}
void RasterizerOpenGL::SyncViewport(OpenGLState& current_state) {
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.h b/src/video_core/renderer_opengl/gl_rasterizer.h
index f7671ff5d..bf67e3a70 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer.h
+++ b/src/video_core/renderer_opengl/gl_rasterizer.h
@@ -23,14 +23,15 @@
#include "video_core/rasterizer_interface.h"
#include "video_core/renderer_opengl/gl_buffer_cache.h"
#include "video_core/renderer_opengl/gl_device.h"
+#include "video_core/renderer_opengl/gl_framebuffer_cache.h"
#include "video_core/renderer_opengl/gl_global_cache.h"
-#include "video_core/renderer_opengl/gl_rasterizer_cache.h"
#include "video_core/renderer_opengl/gl_resource_manager.h"
#include "video_core/renderer_opengl/gl_sampler_cache.h"
#include "video_core/renderer_opengl/gl_shader_cache.h"
#include "video_core/renderer_opengl/gl_shader_decompiler.h"
#include "video_core/renderer_opengl/gl_shader_manager.h"
#include "video_core/renderer_opengl/gl_state.h"
+#include "video_core/renderer_opengl/gl_texture_cache.h"
#include "video_core/renderer_opengl/utils.h"
namespace Core {
@@ -41,11 +42,14 @@ namespace Core::Frontend {
class EmuWindow;
}
+namespace Tegra {
+class MemoryManager;
+}
+
namespace OpenGL {
struct ScreenInfo;
struct DrawParameters;
-struct FramebufferCacheKey;
class RasterizerOpenGL : public VideoCore::RasterizerInterface {
public:
@@ -61,8 +65,7 @@ public:
void FlushAndInvalidateRegion(CacheAddr addr, u64 size) override;
bool AccelerateSurfaceCopy(const Tegra::Engines::Fermi2D::Regs::Surface& src,
const Tegra::Engines::Fermi2D::Regs::Surface& dst,
- const Common::Rectangle<u32>& src_rect,
- const Common::Rectangle<u32>& dst_rect) override;
+ const Tegra::Engines::Fermi2D::Config& copy_config) override;
bool AccelerateDisplay(const Tegra::FramebufferConfig& config, VAddr framebuffer_addr,
u32 pixel_stride) override;
bool AccelerateDrawBatch(bool is_indexed) override;
@@ -95,6 +98,8 @@ private:
/**
* Configures the color and depth framebuffer states.
+ * @param must_reconfigure If true, tells the framebuffer to skip the cache and reconfigure
+ * again. Used by the texture cache to solve texception conflicts
* @param use_color_fb If true, configure color framebuffers.
* @param using_depth_fb If true, configure the depth/stencil framebuffer.
* @param preserve_contents If true, tries to preserve data from a previously used framebuffer.
@@ -118,9 +123,10 @@ private:
void SetupGlobalRegions(Tegra::Engines::Maxwell3D::Regs::ShaderStage stage,
const Shader& shader);
- /// Configures the current textures to use for the draw command.
- void SetupTextures(Tegra::Engines::Maxwell3D::Regs::ShaderStage stage, const Shader& shader,
- BaseBindings base_bindings);
+ /// Configures the current textures to use for the draw command. Returns shaders texture buffer
+ /// usage.
+ TextureBufferUsage SetupTextures(Tegra::Engines::Maxwell3D::Regs::ShaderStage stage,
+ const Shader& shader, BaseBindings base_bindings);
/// Syncs the viewport and depth range to match the guest state
void SyncViewport(OpenGLState& current_state);
@@ -181,10 +187,11 @@ private:
const Device device;
OpenGLState state;
- RasterizerCacheOpenGL res_cache;
+ TextureCacheOpenGL texture_cache;
ShaderCacheOpenGL shader_cache;
GlobalRegionCacheOpenGL global_cache;
SamplerCacheOpenGL sampler_cache;
+ FramebufferCacheOpenGL framebuffer_cache;
Core::System& system;
ScreenInfo& screen_info;
@@ -195,7 +202,6 @@ private:
OGLVertexArray>
vertex_array_cache;
- std::map<FramebufferCacheKey, OGLFramebuffer> framebuffer_cache;
FramebufferConfigState current_framebuffer_config_state;
std::pair<bool, bool> current_depth_stencil_usage{};
@@ -218,8 +224,6 @@ private:
void SetupShaders(GLenum primitive_mode);
- void SetupCachedFramebuffer(const FramebufferCacheKey& fbkey, OpenGLState& current_state);
-
enum class AccelDraw { Disabled, Arrays, Indexed };
AccelDraw accelerate_draw = AccelDraw::Disabled;
diff --git a/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp b/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp
deleted file mode 100644
index a7681902e..000000000
--- a/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp
+++ /dev/null
@@ -1,1362 +0,0 @@
-// Copyright 2018 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
-
-#include <algorithm>
-#include <optional>
-#include <glad/glad.h>
-
-#include "common/alignment.h"
-#include "common/assert.h"
-#include "common/logging/log.h"
-#include "common/microprofile.h"
-#include "common/scope_exit.h"
-#include "core/core.h"
-#include "core/hle/kernel/process.h"
-#include "core/settings.h"
-#include "video_core/engines/maxwell_3d.h"
-#include "video_core/memory_manager.h"
-#include "video_core/morton.h"
-#include "video_core/renderer_opengl/gl_rasterizer.h"
-#include "video_core/renderer_opengl/gl_rasterizer_cache.h"
-#include "video_core/renderer_opengl/utils.h"
-#include "video_core/surface.h"
-#include "video_core/textures/convert.h"
-#include "video_core/textures/decoders.h"
-
-namespace OpenGL {
-
-using VideoCore::MortonSwizzle;
-using VideoCore::MortonSwizzleMode;
-using VideoCore::Surface::ComponentTypeFromDepthFormat;
-using VideoCore::Surface::ComponentTypeFromRenderTarget;
-using VideoCore::Surface::ComponentTypeFromTexture;
-using VideoCore::Surface::PixelFormatFromDepthFormat;
-using VideoCore::Surface::PixelFormatFromRenderTargetFormat;
-using VideoCore::Surface::PixelFormatFromTextureFormat;
-using VideoCore::Surface::SurfaceTargetFromTextureType;
-
-struct FormatTuple {
- GLint internal_format;
- GLenum format;
- GLenum type;
- ComponentType component_type;
- bool compressed;
-};
-
-static void ApplyTextureDefaults(GLuint texture, u32 max_mip_level) {
- glTextureParameteri(texture, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
- glTextureParameteri(texture, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
- glTextureParameteri(texture, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
- glTextureParameteri(texture, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
- glTextureParameteri(texture, GL_TEXTURE_MAX_LEVEL, max_mip_level - 1);
- if (max_mip_level == 1) {
- glTextureParameterf(texture, GL_TEXTURE_LOD_BIAS, 1000.0);
- }
-}
-
-void SurfaceParams::InitCacheParameters(GPUVAddr gpu_addr_) {
- auto& memory_manager{Core::System::GetInstance().GPU().MemoryManager()};
-
- gpu_addr = gpu_addr_;
- host_ptr = memory_manager.GetPointer(gpu_addr_);
- size_in_bytes = SizeInBytesRaw();
-
- if (IsPixelFormatASTC(pixel_format)) {
- // ASTC is uncompressed in software, in emulated as RGBA8
- size_in_bytes_gl = width * height * depth * 4;
- } else {
- size_in_bytes_gl = SizeInBytesGL();
- }
-}
-
-std::size_t SurfaceParams::InnerMipmapMemorySize(u32 mip_level, bool force_gl, bool layer_only,
- bool uncompressed) const {
- const u32 tile_x{GetDefaultBlockWidth(pixel_format)};
- const u32 tile_y{GetDefaultBlockHeight(pixel_format)};
- const u32 bytes_per_pixel{GetBytesPerPixel(pixel_format)};
- u32 m_depth = (layer_only ? 1U : depth);
- u32 m_width = MipWidth(mip_level);
- u32 m_height = MipHeight(mip_level);
- m_width = uncompressed ? m_width : std::max(1U, (m_width + tile_x - 1) / tile_x);
- m_height = uncompressed ? m_height : std::max(1U, (m_height + tile_y - 1) / tile_y);
- m_depth = std::max(1U, m_depth >> mip_level);
- u32 m_block_height = MipBlockHeight(mip_level);
- u32 m_block_depth = MipBlockDepth(mip_level);
- return Tegra::Texture::CalculateSize(force_gl ? false : is_tiled, bytes_per_pixel, m_width,
- m_height, m_depth, m_block_height, m_block_depth);
-}
-
-std::size_t SurfaceParams::InnerMemorySize(bool force_gl, bool layer_only,
- bool uncompressed) const {
- std::size_t block_size_bytes = Tegra::Texture::GetGOBSize() * block_height * block_depth;
- std::size_t size = 0;
- for (u32 i = 0; i < max_mip_level; i++) {
- size += InnerMipmapMemorySize(i, force_gl, layer_only, uncompressed);
- }
- if (!force_gl && is_tiled) {
- size = Common::AlignUp(size, block_size_bytes);
- }
- return size;
-}
-
-/*static*/ SurfaceParams SurfaceParams::CreateForTexture(
- const Tegra::Texture::FullTextureInfo& config, const GLShader::SamplerEntry& entry) {
- SurfaceParams params{};
- params.is_tiled = config.tic.IsTiled();
- params.block_width = params.is_tiled ? config.tic.BlockWidth() : 0,
- params.block_height = params.is_tiled ? config.tic.BlockHeight() : 0,
- params.block_depth = params.is_tiled ? config.tic.BlockDepth() : 0,
- params.tile_width_spacing = params.is_tiled ? (1 << config.tic.tile_width_spacing.Value()) : 1;
- params.srgb_conversion = config.tic.IsSrgbConversionEnabled();
- params.pixel_format = PixelFormatFromTextureFormat(config.tic.format, config.tic.r_type.Value(),
- params.srgb_conversion);
-
- if (config.tsc.depth_compare_enabled) {
- // Some titles create a 'R16U' (normalized 16-bit) texture with depth_compare enabled,
- // then attempt to sample from it via a shadow sampler. Convert format to Z16 (which also
- // causes GetFormatType to properly return 'Depth' below).
- if (GetFormatType(params.pixel_format) == SurfaceType::ColorTexture) {
- switch (params.pixel_format) {
- case PixelFormat::R16S:
- case PixelFormat::R16U:
- case PixelFormat::R16F:
- params.pixel_format = PixelFormat::Z16;
- break;
- case PixelFormat::R32F:
- params.pixel_format = PixelFormat::Z32F;
- break;
- default:
- LOG_WARNING(HW_GPU, "Color texture format being used with depth compare: {}",
- static_cast<u32>(params.pixel_format));
- break;
- }
- }
- }
-
- params.component_type = ComponentTypeFromTexture(config.tic.r_type.Value());
- params.type = GetFormatType(params.pixel_format);
- UNIMPLEMENTED_IF(params.type == SurfaceType::ColorTexture && config.tsc.depth_compare_enabled);
-
- params.width = Common::AlignUp(config.tic.Width(), GetCompressionFactor(params.pixel_format));
- params.height = Common::AlignUp(config.tic.Height(), GetCompressionFactor(params.pixel_format));
- if (!params.is_tiled) {
- params.pitch = config.tic.Pitch();
- }
- params.unaligned_height = config.tic.Height();
- params.target = SurfaceTargetFromTextureType(config.tic.texture_type);
- params.identity = SurfaceClass::Uploaded;
-
- switch (params.target) {
- case SurfaceTarget::Texture1D:
- case SurfaceTarget::Texture2D:
- params.depth = 1;
- break;
- case SurfaceTarget::TextureCubemap:
- params.depth = config.tic.Depth() * 6;
- break;
- case SurfaceTarget::Texture3D:
- params.depth = config.tic.Depth();
- break;
- case SurfaceTarget::Texture2DArray:
- params.depth = config.tic.Depth();
- if (!entry.IsArray()) {
- // TODO(bunnei): We have seen games re-use a Texture2D as Texture2DArray with depth of
- // one, but sample the texture in the shader as if it were not an array texture. This
- // probably is valid on hardware, but we still need to write a test to confirm this. In
- // emulation, the workaround here is to continue to treat this as a Texture2D. An
- // example game that does this is Super Mario Odyssey (in Cloud Kingdom).
- ASSERT(params.depth == 1);
- params.target = SurfaceTarget::Texture2D;
- }
- break;
- case SurfaceTarget::TextureCubeArray:
- params.depth = config.tic.Depth() * 6;
- if (!entry.IsArray()) {
- ASSERT(params.depth == 6);
- params.target = SurfaceTarget::TextureCubemap;
- }
- break;
- default:
- LOG_CRITICAL(HW_GPU, "Unknown depth for target={}", static_cast<u32>(params.target));
- UNREACHABLE();
- params.depth = 1;
- break;
- }
-
- params.is_layered = SurfaceTargetIsLayered(params.target);
- params.is_array = SurfaceTargetIsArray(params.target);
- params.max_mip_level = config.tic.max_mip_level + 1;
- params.rt = {};
-
- params.InitCacheParameters(config.tic.Address());
-
- return params;
-}
-
-/*static*/ SurfaceParams SurfaceParams::CreateForFramebuffer(std::size_t index) {
- const auto& config{Core::System::GetInstance().GPU().Maxwell3D().regs.rt[index]};
- SurfaceParams params{};
-
- params.is_tiled =
- config.memory_layout.type == Tegra::Engines::Maxwell3D::Regs::InvMemoryLayout::BlockLinear;
- params.block_width = 1 << config.memory_layout.block_width;
- params.block_height = 1 << config.memory_layout.block_height;
- params.block_depth = 1 << config.memory_layout.block_depth;
- params.tile_width_spacing = 1;
- params.pixel_format = PixelFormatFromRenderTargetFormat(config.format);
- params.srgb_conversion = config.format == Tegra::RenderTargetFormat::BGRA8_SRGB ||
- config.format == Tegra::RenderTargetFormat::RGBA8_SRGB;
- params.component_type = ComponentTypeFromRenderTarget(config.format);
- params.type = GetFormatType(params.pixel_format);
- if (params.is_tiled) {
- params.width = config.width;
- } else {
- params.pitch = config.width;
- const u32 bpp = params.GetFormatBpp() / 8;
- params.width = params.pitch / bpp;
- }
- params.height = config.height;
- params.unaligned_height = config.height;
- params.target = SurfaceTarget::Texture2D;
- params.identity = SurfaceClass::RenderTarget;
- params.depth = 1;
- params.max_mip_level = 1;
- params.is_layered = false;
-
- // Render target specific parameters, not used for caching
- params.rt.index = static_cast<u32>(index);
- params.rt.array_mode = config.array_mode;
- params.rt.layer_stride = config.layer_stride;
- params.rt.volume = config.volume;
- params.rt.base_layer = config.base_layer;
-
- params.InitCacheParameters(config.Address());
-
- return params;
-}
-
-/*static*/ SurfaceParams SurfaceParams::CreateForDepthBuffer(
- u32 zeta_width, u32 zeta_height, GPUVAddr zeta_address, Tegra::DepthFormat format,
- u32 block_width, u32 block_height, u32 block_depth,
- Tegra::Engines::Maxwell3D::Regs::InvMemoryLayout type) {
- SurfaceParams params{};
-
- params.is_tiled = type == Tegra::Engines::Maxwell3D::Regs::InvMemoryLayout::BlockLinear;
- params.block_width = 1 << std::min(block_width, 5U);
- params.block_height = 1 << std::min(block_height, 5U);
- params.block_depth = 1 << std::min(block_depth, 5U);
- params.tile_width_spacing = 1;
- params.pixel_format = PixelFormatFromDepthFormat(format);
- params.component_type = ComponentTypeFromDepthFormat(format);
- params.type = GetFormatType(params.pixel_format);
- params.srgb_conversion = false;
- params.width = zeta_width;
- params.height = zeta_height;
- params.unaligned_height = zeta_height;
- params.target = SurfaceTarget::Texture2D;
- params.identity = SurfaceClass::DepthBuffer;
- params.depth = 1;
- params.max_mip_level = 1;
- params.is_layered = false;
- params.rt = {};
-
- params.InitCacheParameters(zeta_address);
-
- return params;
-}
-
-/*static*/ SurfaceParams SurfaceParams::CreateForFermiCopySurface(
- const Tegra::Engines::Fermi2D::Regs::Surface& config) {
- SurfaceParams params{};
-
- params.is_tiled = !config.linear;
- params.block_width = params.is_tiled ? std::min(config.BlockWidth(), 32U) : 0,
- params.block_height = params.is_tiled ? std::min(config.BlockHeight(), 32U) : 0,
- params.block_depth = params.is_tiled ? std::min(config.BlockDepth(), 32U) : 0,
- params.tile_width_spacing = 1;
- params.pixel_format = PixelFormatFromRenderTargetFormat(config.format);
- params.srgb_conversion = config.format == Tegra::RenderTargetFormat::BGRA8_SRGB ||
- config.format == Tegra::RenderTargetFormat::RGBA8_SRGB;
- params.component_type = ComponentTypeFromRenderTarget(config.format);
- params.type = GetFormatType(params.pixel_format);
- params.width = config.width;
- params.pitch = config.pitch;
- params.height = config.height;
- params.unaligned_height = config.height;
- params.target = SurfaceTarget::Texture2D;
- params.identity = SurfaceClass::Copy;
- params.depth = 1;
- params.max_mip_level = 1;
- params.rt = {};
-
- params.InitCacheParameters(config.Address());
-
- return params;
-}
-
-static constexpr std::array<FormatTuple, VideoCore::Surface::MaxPixelFormat> tex_format_tuples = {{
- {GL_RGBA8, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8_REV, ComponentType::UNorm, false}, // ABGR8U
- {GL_RGBA8, GL_RGBA, GL_BYTE, ComponentType::SNorm, false}, // ABGR8S
- {GL_RGBA8UI, GL_RGBA_INTEGER, GL_UNSIGNED_BYTE, ComponentType::UInt, false}, // ABGR8UI
- {GL_RGB8, GL_RGB, GL_UNSIGNED_SHORT_5_6_5_REV, ComponentType::UNorm, false}, // B5G6R5U
- {GL_RGB10_A2, GL_RGBA, GL_UNSIGNED_INT_2_10_10_10_REV, ComponentType::UNorm,
- false}, // A2B10G10R10U
- {GL_RGB5_A1, GL_RGBA, GL_UNSIGNED_SHORT_1_5_5_5_REV, ComponentType::UNorm, false}, // A1B5G5R5U
- {GL_R8, GL_RED, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // R8U
- {GL_R8UI, GL_RED_INTEGER, GL_UNSIGNED_BYTE, ComponentType::UInt, false}, // R8UI
- {GL_RGBA16F, GL_RGBA, GL_HALF_FLOAT, ComponentType::Float, false}, // RGBA16F
- {GL_RGBA16, GL_RGBA, GL_UNSIGNED_SHORT, ComponentType::UNorm, false}, // RGBA16U
- {GL_RGBA16UI, GL_RGBA_INTEGER, GL_UNSIGNED_SHORT, ComponentType::UInt, false}, // RGBA16UI
- {GL_R11F_G11F_B10F, GL_RGB, GL_UNSIGNED_INT_10F_11F_11F_REV, ComponentType::Float,
- false}, // R11FG11FB10F
- {GL_RGBA32UI, GL_RGBA_INTEGER, GL_UNSIGNED_INT, ComponentType::UInt, false}, // RGBA32UI
- {GL_COMPRESSED_RGBA_S3TC_DXT1_EXT, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, ComponentType::UNorm,
- true}, // DXT1
- {GL_COMPRESSED_RGBA_S3TC_DXT3_EXT, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, ComponentType::UNorm,
- true}, // DXT23
- {GL_COMPRESSED_RGBA_S3TC_DXT5_EXT, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, ComponentType::UNorm,
- true}, // DXT45
- {GL_COMPRESSED_RED_RGTC1, GL_RED, GL_UNSIGNED_INT_8_8_8_8, ComponentType::UNorm, true}, // DXN1
- {GL_COMPRESSED_RG_RGTC2, GL_RG, GL_UNSIGNED_INT_8_8_8_8, ComponentType::UNorm,
- true}, // DXN2UNORM
- {GL_COMPRESSED_SIGNED_RG_RGTC2, GL_RG, GL_INT, ComponentType::SNorm, true}, // DXN2SNORM
- {GL_COMPRESSED_RGBA_BPTC_UNORM, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, ComponentType::UNorm,
- true}, // BC7U
- {GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT, GL_RGB, GL_UNSIGNED_INT_8_8_8_8, ComponentType::Float,
- true}, // BC6H_UF16
- {GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT, GL_RGB, GL_UNSIGNED_INT_8_8_8_8, ComponentType::Float,
- true}, // BC6H_SF16
- {GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_4X4
- {GL_RGBA8, GL_BGRA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // BGRA8
- {GL_RGBA32F, GL_RGBA, GL_FLOAT, ComponentType::Float, false}, // RGBA32F
- {GL_RG32F, GL_RG, GL_FLOAT, ComponentType::Float, false}, // RG32F
- {GL_R32F, GL_RED, GL_FLOAT, ComponentType::Float, false}, // R32F
- {GL_R16F, GL_RED, GL_HALF_FLOAT, ComponentType::Float, false}, // R16F
- {GL_R16, GL_RED, GL_UNSIGNED_SHORT, ComponentType::UNorm, false}, // R16U
- {GL_R16_SNORM, GL_RED, GL_SHORT, ComponentType::SNorm, false}, // R16S
- {GL_R16UI, GL_RED_INTEGER, GL_UNSIGNED_SHORT, ComponentType::UInt, false}, // R16UI
- {GL_R16I, GL_RED_INTEGER, GL_SHORT, ComponentType::SInt, false}, // R16I
- {GL_RG16, GL_RG, GL_UNSIGNED_SHORT, ComponentType::UNorm, false}, // RG16
- {GL_RG16F, GL_RG, GL_HALF_FLOAT, ComponentType::Float, false}, // RG16F
- {GL_RG16UI, GL_RG_INTEGER, GL_UNSIGNED_SHORT, ComponentType::UInt, false}, // RG16UI
- {GL_RG16I, GL_RG_INTEGER, GL_SHORT, ComponentType::SInt, false}, // RG16I
- {GL_RG16_SNORM, GL_RG, GL_SHORT, ComponentType::SNorm, false}, // RG16S
- {GL_RGB32F, GL_RGB, GL_FLOAT, ComponentType::Float, false}, // RGB32F
- {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8_REV, ComponentType::UNorm,
- false}, // RGBA8_SRGB
- {GL_RG8, GL_RG, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // RG8U
- {GL_RG8, GL_RG, GL_BYTE, ComponentType::SNorm, false}, // RG8S
- {GL_RG32UI, GL_RG_INTEGER, GL_UNSIGNED_INT, ComponentType::UInt, false}, // RG32UI
- {GL_R32UI, GL_RED_INTEGER, GL_UNSIGNED_INT, ComponentType::UInt, false}, // R32UI
- {GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_8X8
- {GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_8X5
- {GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_5X4
- {GL_SRGB8_ALPHA8, GL_BGRA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // BGRA8
- // Compressed sRGB formats
- {GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, ComponentType::UNorm,
- true}, // DXT1_SRGB
- {GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, ComponentType::UNorm,
- true}, // DXT23_SRGB
- {GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, ComponentType::UNorm,
- true}, // DXT45_SRGB
- {GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, ComponentType::UNorm,
- true}, // BC7U_SRGB
- {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_4X4_SRGB
- {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_8X8_SRGB
- {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_8X5_SRGB
- {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_5X4_SRGB
- {GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_5X5
- {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_5X5_SRGB
- {GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_10X8
- {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_10X8_SRGB
-
- // Depth formats
- {GL_DEPTH_COMPONENT32F, GL_DEPTH_COMPONENT, GL_FLOAT, ComponentType::Float, false}, // Z32F
- {GL_DEPTH_COMPONENT16, GL_DEPTH_COMPONENT, GL_UNSIGNED_SHORT, ComponentType::UNorm,
- false}, // Z16
-
- // DepthStencil formats
- {GL_DEPTH24_STENCIL8, GL_DEPTH_STENCIL, GL_UNSIGNED_INT_24_8, ComponentType::UNorm,
- false}, // Z24S8
- {GL_DEPTH24_STENCIL8, GL_DEPTH_STENCIL, GL_UNSIGNED_INT_24_8, ComponentType::UNorm,
- false}, // S8Z24
- {GL_DEPTH32F_STENCIL8, GL_DEPTH_STENCIL, GL_FLOAT_32_UNSIGNED_INT_24_8_REV,
- ComponentType::Float, false}, // Z32FS8
-}};
-
-static GLenum SurfaceTargetToGL(SurfaceTarget target) {
- switch (target) {
- case SurfaceTarget::Texture1D:
- return GL_TEXTURE_1D;
- case SurfaceTarget::Texture2D:
- return GL_TEXTURE_2D;
- case SurfaceTarget::Texture3D:
- return GL_TEXTURE_3D;
- case SurfaceTarget::Texture1DArray:
- return GL_TEXTURE_1D_ARRAY;
- case SurfaceTarget::Texture2DArray:
- return GL_TEXTURE_2D_ARRAY;
- case SurfaceTarget::TextureCubemap:
- return GL_TEXTURE_CUBE_MAP;
- case SurfaceTarget::TextureCubeArray:
- return GL_TEXTURE_CUBE_MAP_ARRAY;
- }
- LOG_CRITICAL(Render_OpenGL, "Unimplemented texture target={}", static_cast<u32>(target));
- UNREACHABLE();
- return {};
-}
-
-static const FormatTuple& GetFormatTuple(PixelFormat pixel_format, ComponentType component_type) {
- ASSERT(static_cast<std::size_t>(pixel_format) < tex_format_tuples.size());
- auto& format = tex_format_tuples[static_cast<unsigned int>(pixel_format)];
- ASSERT(component_type == format.component_type);
-
- return format;
-}
-
-/// Returns the discrepant array target
-constexpr GLenum GetArrayDiscrepantTarget(SurfaceTarget target) {
- switch (target) {
- case SurfaceTarget::Texture1D:
- return GL_TEXTURE_1D_ARRAY;
- case SurfaceTarget::Texture2D:
- return GL_TEXTURE_2D_ARRAY;
- case SurfaceTarget::Texture3D:
- return GL_NONE;
- case SurfaceTarget::Texture1DArray:
- return GL_TEXTURE_1D;
- case SurfaceTarget::Texture2DArray:
- return GL_TEXTURE_2D;
- case SurfaceTarget::TextureCubemap:
- return GL_TEXTURE_CUBE_MAP_ARRAY;
- case SurfaceTarget::TextureCubeArray:
- return GL_TEXTURE_CUBE_MAP;
- }
- return GL_NONE;
-}
-
-Common::Rectangle<u32> SurfaceParams::GetRect(u32 mip_level) const {
- u32 actual_height{std::max(1U, unaligned_height >> mip_level)};
- if (IsPixelFormatASTC(pixel_format)) {
- // ASTC formats must stop at the ATSC block size boundary
- actual_height = Common::AlignDown(actual_height, GetASTCBlockSize(pixel_format).second);
- }
- return {0, actual_height, MipWidth(mip_level), 0};
-}
-
-void SwizzleFunc(const MortonSwizzleMode& mode, const SurfaceParams& params,
- std::vector<u8>& gl_buffer, u32 mip_level) {
- u32 depth = params.MipDepth(mip_level);
- if (params.target == SurfaceTarget::Texture2D) {
- // TODO(Blinkhawk): Eliminate this condition once all texture types are implemented.
- depth = 1U;
- }
- if (params.is_layered) {
- u64 offset = params.GetMipmapLevelOffset(mip_level);
- u64 offset_gl = 0;
- const u64 layer_size = params.LayerMemorySize();
- const u64 gl_size = params.LayerSizeGL(mip_level);
- for (u32 i = 0; i < params.depth; i++) {
- MortonSwizzle(mode, params.pixel_format, params.MipWidth(mip_level),
- params.MipBlockHeight(mip_level), params.MipHeight(mip_level),
- params.MipBlockDepth(mip_level), 1, params.tile_width_spacing,
- gl_buffer.data() + offset_gl, params.host_ptr + offset);
- offset += layer_size;
- offset_gl += gl_size;
- }
- } else {
- const u64 offset = params.GetMipmapLevelOffset(mip_level);
- MortonSwizzle(mode, params.pixel_format, params.MipWidth(mip_level),
- params.MipBlockHeight(mip_level), params.MipHeight(mip_level),
- params.MipBlockDepth(mip_level), depth, params.tile_width_spacing,
- gl_buffer.data(), params.host_ptr + offset);
- }
-}
-
-void RasterizerCacheOpenGL::FastCopySurface(const Surface& src_surface,
- const Surface& dst_surface) {
- const auto& src_params{src_surface->GetSurfaceParams()};
- const auto& dst_params{dst_surface->GetSurfaceParams()};
-
- const u32 width{std::min(src_params.width, dst_params.width)};
- const u32 height{std::min(src_params.height, dst_params.height)};
-
- glCopyImageSubData(src_surface->Texture().handle, SurfaceTargetToGL(src_params.target), 0, 0, 0,
- 0, dst_surface->Texture().handle, SurfaceTargetToGL(dst_params.target), 0, 0,
- 0, 0, width, height, 1);
-
- dst_surface->MarkAsModified(true, *this);
-}
-
-MICROPROFILE_DEFINE(OpenGL_CopySurface, "OpenGL", "CopySurface", MP_RGB(128, 192, 64));
-void RasterizerCacheOpenGL::CopySurface(const Surface& src_surface, const Surface& dst_surface,
- const GLuint copy_pbo_handle, const GLenum src_attachment,
- const GLenum dst_attachment,
- const std::size_t cubemap_face) {
- MICROPROFILE_SCOPE(OpenGL_CopySurface);
- ASSERT_MSG(dst_attachment == 0, "Unimplemented");
-
- const auto& src_params{src_surface->GetSurfaceParams()};
- const auto& dst_params{dst_surface->GetSurfaceParams()};
-
- const auto source_format = GetFormatTuple(src_params.pixel_format, src_params.component_type);
- const auto dest_format = GetFormatTuple(dst_params.pixel_format, dst_params.component_type);
-
- const std::size_t buffer_size = std::max(src_params.size_in_bytes, dst_params.size_in_bytes);
-
- glBindBuffer(GL_PIXEL_PACK_BUFFER, copy_pbo_handle);
- glBufferData(GL_PIXEL_PACK_BUFFER, buffer_size, nullptr, GL_STREAM_COPY);
- if (source_format.compressed) {
- glGetCompressedTextureImage(src_surface->Texture().handle, src_attachment,
- static_cast<GLsizei>(src_params.size_in_bytes), nullptr);
- } else {
- glGetTextureImage(src_surface->Texture().handle, src_attachment, source_format.format,
- source_format.type, static_cast<GLsizei>(src_params.size_in_bytes),
- nullptr);
- }
- // If the new texture is bigger than the previous one, we need to fill in the rest with data
- // from the CPU.
- if (src_params.size_in_bytes < dst_params.size_in_bytes) {
- // Upload the rest of the memory.
- if (dst_params.is_tiled) {
- // TODO(Subv): We might have to de-tile the subtexture and re-tile it with the rest
- // of the data in this case. Games like Super Mario Odyssey seem to hit this case
- // when drawing, it re-uses the memory of a previous texture as a bigger framebuffer
- // but it doesn't clear it beforehand, the texture is already full of zeros.
- LOG_DEBUG(HW_GPU, "Trying to upload extra texture data from the CPU during "
- "reinterpretation but the texture is tiled.");
- }
- const std::size_t remaining_size = dst_params.size_in_bytes - src_params.size_in_bytes;
- auto& memory_manager{Core::System::GetInstance().GPU().MemoryManager()};
- glBufferSubData(GL_PIXEL_PACK_BUFFER, src_params.size_in_bytes, remaining_size,
- memory_manager.GetPointer(dst_params.gpu_addr + src_params.size_in_bytes));
- }
-
- glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
-
- const GLsizei width{static_cast<GLsizei>(
- std::min(src_params.GetRect().GetWidth(), dst_params.GetRect().GetWidth()))};
- const GLsizei height{static_cast<GLsizei>(
- std::min(src_params.GetRect().GetHeight(), dst_params.GetRect().GetHeight()))};
-
- glBindBuffer(GL_PIXEL_UNPACK_BUFFER, copy_pbo_handle);
- if (dest_format.compressed) {
- LOG_CRITICAL(HW_GPU, "Compressed copy is unimplemented!");
- UNREACHABLE();
- } else {
- switch (dst_params.target) {
- case SurfaceTarget::Texture1D:
- glTextureSubImage1D(dst_surface->Texture().handle, 0, 0, width, dest_format.format,
- dest_format.type, nullptr);
- break;
- case SurfaceTarget::Texture2D:
- glTextureSubImage2D(dst_surface->Texture().handle, 0, 0, 0, width, height,
- dest_format.format, dest_format.type, nullptr);
- break;
- case SurfaceTarget::Texture3D:
- case SurfaceTarget::Texture2DArray:
- case SurfaceTarget::TextureCubeArray:
- glTextureSubImage3D(dst_surface->Texture().handle, 0, 0, 0, 0, width, height,
- static_cast<GLsizei>(dst_params.depth), dest_format.format,
- dest_format.type, nullptr);
- break;
- case SurfaceTarget::TextureCubemap:
- glTextureSubImage3D(dst_surface->Texture().handle, 0, 0, 0,
- static_cast<GLint>(cubemap_face), width, height, 1,
- dest_format.format, dest_format.type, nullptr);
- break;
- default:
- LOG_CRITICAL(Render_OpenGL, "Unimplemented surface target={}",
- static_cast<u32>(dst_params.target));
- UNREACHABLE();
- }
- glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
- }
-
- dst_surface->MarkAsModified(true, *this);
-}
-
-CachedSurface::CachedSurface(const SurfaceParams& params)
- : RasterizerCacheObject{params.host_ptr}, params{params},
- gl_target{SurfaceTargetToGL(params.target)}, cached_size_in_bytes{params.size_in_bytes} {
-
- const auto optional_cpu_addr{
- Core::System::GetInstance().GPU().MemoryManager().GpuToCpuAddress(params.gpu_addr)};
- ASSERT_MSG(optional_cpu_addr, "optional_cpu_addr is invalid");
- cpu_addr = *optional_cpu_addr;
-
- texture.Create(gl_target);
-
- // TODO(Rodrigo): Using params.GetRect() returns a different size than using its Mip*(0)
- // alternatives. This signals a bug on those functions.
- const auto width = static_cast<GLsizei>(params.MipWidth(0));
- const auto height = static_cast<GLsizei>(params.MipHeight(0));
- memory_size = params.MemorySize();
- reinterpreted = false;
-
- const auto& format_tuple = GetFormatTuple(params.pixel_format, params.component_type);
- gl_internal_format = format_tuple.internal_format;
-
- switch (params.target) {
- case SurfaceTarget::Texture1D:
- glTextureStorage1D(texture.handle, params.max_mip_level, format_tuple.internal_format,
- width);
- break;
- case SurfaceTarget::Texture2D:
- case SurfaceTarget::TextureCubemap:
- glTextureStorage2D(texture.handle, params.max_mip_level, format_tuple.internal_format,
- width, height);
- break;
- case SurfaceTarget::Texture3D:
- case SurfaceTarget::Texture2DArray:
- case SurfaceTarget::TextureCubeArray:
- glTextureStorage3D(texture.handle, params.max_mip_level, format_tuple.internal_format,
- width, height, params.depth);
- break;
- default:
- LOG_CRITICAL(Render_OpenGL, "Unimplemented surface target={}",
- static_cast<u32>(params.target));
- UNREACHABLE();
- glTextureStorage2D(texture.handle, params.max_mip_level, format_tuple.internal_format,
- width, height);
- }
-
- ApplyTextureDefaults(texture.handle, params.max_mip_level);
-
- OpenGL::LabelGLObject(GL_TEXTURE, texture.handle, params.gpu_addr, params.IdentityString());
-}
-
-MICROPROFILE_DEFINE(OpenGL_SurfaceLoad, "OpenGL", "Surface Load", MP_RGB(128, 192, 64));
-void CachedSurface::LoadGLBuffer(RasterizerTemporaryMemory& res_cache_tmp_mem) {
- MICROPROFILE_SCOPE(OpenGL_SurfaceLoad);
- auto& gl_buffer = res_cache_tmp_mem.gl_buffer;
- if (gl_buffer.size() < params.max_mip_level)
- gl_buffer.resize(params.max_mip_level);
- for (u32 i = 0; i < params.max_mip_level; i++)
- gl_buffer[i].resize(params.GetMipmapSizeGL(i));
- if (params.is_tiled) {
- ASSERT_MSG(params.block_width == 1, "Block width is defined as {} on texture type {}",
- params.block_width, static_cast<u32>(params.target));
- for (u32 i = 0; i < params.max_mip_level; i++)
- SwizzleFunc(MortonSwizzleMode::MortonToLinear, params, gl_buffer[i], i);
- } else {
- const u32 bpp = params.GetFormatBpp() / 8;
- const u32 copy_size = (params.width * bpp + GetDefaultBlockWidth(params.pixel_format) - 1) /
- GetDefaultBlockWidth(params.pixel_format);
- if (params.pitch == copy_size) {
- std::memcpy(gl_buffer[0].data(), params.host_ptr, params.size_in_bytes_gl);
- } else {
- const u32 height = (params.height + GetDefaultBlockHeight(params.pixel_format) - 1) /
- GetDefaultBlockHeight(params.pixel_format);
- const u8* start{params.host_ptr};
- u8* write_to = gl_buffer[0].data();
- for (u32 h = height; h > 0; h--) {
- std::memcpy(write_to, start, copy_size);
- start += params.pitch;
- write_to += copy_size;
- }
- }
- }
- for (u32 i = 0; i < params.max_mip_level; i++) {
- const u32 width = params.MipWidth(i);
- const u32 height = params.MipHeight(i);
- const u32 depth = params.MipDepth(i);
- if (VideoCore::Surface::IsPixelFormatASTC(params.pixel_format)) {
- // Reserve size for RGBA8 conversion
- constexpr std::size_t rgba_bpp = 4;
- gl_buffer[i].resize(std::max(gl_buffer[i].size(), width * height * depth * rgba_bpp));
- }
- Tegra::Texture::ConvertFromGuestToHost(gl_buffer[i].data(), params.pixel_format, width,
- height, depth, true, true);
- }
-}
-
-MICROPROFILE_DEFINE(OpenGL_SurfaceFlush, "OpenGL", "Surface Flush", MP_RGB(128, 192, 64));
-void CachedSurface::FlushGLBuffer(RasterizerTemporaryMemory& res_cache_tmp_mem) {
- MICROPROFILE_SCOPE(OpenGL_SurfaceFlush);
-
- ASSERT_MSG(!IsPixelFormatASTC(params.pixel_format), "Unimplemented");
-
- auto& gl_buffer = res_cache_tmp_mem.gl_buffer;
- // OpenGL temporary buffer needs to be big enough to store raw texture size
- gl_buffer[0].resize(GetSizeInBytes());
-
- const FormatTuple& tuple = GetFormatTuple(params.pixel_format, params.component_type);
- const u32 align = std::clamp(params.RowAlign(0), 1U, 8U);
- glPixelStorei(GL_PACK_ALIGNMENT, align);
- glPixelStorei(GL_PACK_ROW_LENGTH, static_cast<GLint>(params.width));
- ASSERT(!tuple.compressed);
- glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
- glGetTextureImage(texture.handle, 0, tuple.format, tuple.type,
- static_cast<GLsizei>(gl_buffer[0].size()), gl_buffer[0].data());
- glPixelStorei(GL_PACK_ROW_LENGTH, 0);
- Tegra::Texture::ConvertFromHostToGuest(gl_buffer[0].data(), params.pixel_format, params.width,
- params.height, params.depth, true, true);
- if (params.is_tiled) {
- ASSERT_MSG(params.block_width == 1, "Block width is defined as {} on texture type {}",
- params.block_width, static_cast<u32>(params.target));
-
- SwizzleFunc(MortonSwizzleMode::LinearToMorton, params, gl_buffer[0], 0);
- } else {
- const u32 bpp = params.GetFormatBpp() / 8;
- const u32 copy_size = params.width * bpp;
- if (params.pitch == copy_size) {
- std::memcpy(params.host_ptr, gl_buffer[0].data(), GetSizeInBytes());
- } else {
- u8* start{params.host_ptr};
- const u8* read_to = gl_buffer[0].data();
- for (u32 h = params.height; h > 0; h--) {
- std::memcpy(start, read_to, copy_size);
- start += params.pitch;
- read_to += copy_size;
- }
- }
- }
-}
-
-void CachedSurface::UploadGLMipmapTexture(RasterizerTemporaryMemory& res_cache_tmp_mem, u32 mip_map,
- GLuint read_fb_handle, GLuint draw_fb_handle) {
- const auto& rect{params.GetRect(mip_map)};
-
- auto& gl_buffer = res_cache_tmp_mem.gl_buffer;
-
- // Load data from memory to the surface
- const auto x0 = static_cast<GLint>(rect.left);
- const auto y0 = static_cast<GLint>(rect.bottom);
- auto buffer_offset =
- static_cast<std::size_t>(static_cast<std::size_t>(y0) * params.MipWidth(mip_map) +
- static_cast<std::size_t>(x0)) *
- GetBytesPerPixel(params.pixel_format);
-
- const FormatTuple& tuple = GetFormatTuple(params.pixel_format, params.component_type);
-
- const u32 align = std::clamp(params.RowAlign(mip_map), 1U, 8U);
- glPixelStorei(GL_UNPACK_ALIGNMENT, align);
- glPixelStorei(GL_UNPACK_ROW_LENGTH, static_cast<GLint>(params.MipWidth(mip_map)));
-
- const auto image_size = static_cast<GLsizei>(params.GetMipmapSizeGL(mip_map, false));
- if (tuple.compressed) {
- switch (params.target) {
- case SurfaceTarget::Texture2D:
- glCompressedTextureSubImage2D(
- texture.handle, mip_map, 0, 0, static_cast<GLsizei>(params.MipWidth(mip_map)),
- static_cast<GLsizei>(params.MipHeight(mip_map)), tuple.internal_format, image_size,
- &gl_buffer[mip_map][buffer_offset]);
- break;
- case SurfaceTarget::Texture3D:
- glCompressedTextureSubImage3D(
- texture.handle, mip_map, 0, 0, 0, static_cast<GLsizei>(params.MipWidth(mip_map)),
- static_cast<GLsizei>(params.MipHeight(mip_map)),
- static_cast<GLsizei>(params.MipDepth(mip_map)), tuple.internal_format, image_size,
- &gl_buffer[mip_map][buffer_offset]);
- break;
- case SurfaceTarget::Texture2DArray:
- case SurfaceTarget::TextureCubeArray:
- glCompressedTextureSubImage3D(
- texture.handle, mip_map, 0, 0, 0, static_cast<GLsizei>(params.MipWidth(mip_map)),
- static_cast<GLsizei>(params.MipHeight(mip_map)), static_cast<GLsizei>(params.depth),
- tuple.internal_format, image_size, &gl_buffer[mip_map][buffer_offset]);
- break;
- case SurfaceTarget::TextureCubemap: {
- const auto layer_size = static_cast<GLsizei>(params.LayerSizeGL(mip_map));
- for (std::size_t face = 0; face < params.depth; ++face) {
- glCompressedTextureSubImage3D(
- texture.handle, mip_map, 0, 0, static_cast<GLint>(face),
- static_cast<GLsizei>(params.MipWidth(mip_map)),
- static_cast<GLsizei>(params.MipHeight(mip_map)), 1, tuple.internal_format,
- layer_size, &gl_buffer[mip_map][buffer_offset]);
- buffer_offset += layer_size;
- }
- break;
- }
- default:
- LOG_CRITICAL(Render_OpenGL, "Unimplemented surface target={}",
- static_cast<u32>(params.target));
- UNREACHABLE();
- glCompressedTextureSubImage2D(
- texture.handle, mip_map, 0, 0, static_cast<GLsizei>(params.MipWidth(mip_map)),
- static_cast<GLsizei>(params.MipHeight(mip_map)), tuple.internal_format,
- static_cast<GLsizei>(params.size_in_bytes_gl), &gl_buffer[mip_map][buffer_offset]);
- }
- } else {
- switch (params.target) {
- case SurfaceTarget::Texture1D:
- glTextureSubImage1D(texture.handle, mip_map, x0, static_cast<GLsizei>(rect.GetWidth()),
- tuple.format, tuple.type, &gl_buffer[mip_map][buffer_offset]);
- break;
- case SurfaceTarget::Texture2D:
- glTextureSubImage2D(texture.handle, mip_map, x0, y0,
- static_cast<GLsizei>(rect.GetWidth()),
- static_cast<GLsizei>(rect.GetHeight()), tuple.format, tuple.type,
- &gl_buffer[mip_map][buffer_offset]);
- break;
- case SurfaceTarget::Texture3D:
- glTextureSubImage3D(texture.handle, mip_map, x0, y0, 0,
- static_cast<GLsizei>(rect.GetWidth()),
- static_cast<GLsizei>(rect.GetHeight()), params.MipDepth(mip_map),
- tuple.format, tuple.type, &gl_buffer[mip_map][buffer_offset]);
- break;
- case SurfaceTarget::Texture2DArray:
- case SurfaceTarget::TextureCubeArray:
- glTextureSubImage3D(texture.handle, mip_map, x0, y0, 0,
- static_cast<GLsizei>(rect.GetWidth()),
- static_cast<GLsizei>(rect.GetHeight()), params.depth, tuple.format,
- tuple.type, &gl_buffer[mip_map][buffer_offset]);
- break;
- case SurfaceTarget::TextureCubemap: {
- for (std::size_t face = 0; face < params.depth; ++face) {
- glTextureSubImage3D(texture.handle, mip_map, x0, y0, static_cast<GLint>(face),
- static_cast<GLsizei>(rect.GetWidth()),
- static_cast<GLsizei>(rect.GetHeight()), 1, tuple.format,
- tuple.type, &gl_buffer[mip_map][buffer_offset]);
- buffer_offset += params.LayerSizeGL(mip_map);
- }
- break;
- }
- default:
- LOG_CRITICAL(Render_OpenGL, "Unimplemented surface target={}",
- static_cast<u32>(params.target));
- UNREACHABLE();
- glTextureSubImage2D(texture.handle, mip_map, x0, y0,
- static_cast<GLsizei>(rect.GetWidth()),
- static_cast<GLsizei>(rect.GetHeight()), tuple.format, tuple.type,
- &gl_buffer[mip_map][buffer_offset]);
- }
- }
-
- glPixelStorei(GL_UNPACK_ROW_LENGTH, 0);
-}
-
-void CachedSurface::EnsureTextureDiscrepantView() {
- if (discrepant_view.handle != 0)
- return;
-
- const GLenum target{GetArrayDiscrepantTarget(params.target)};
- ASSERT(target != GL_NONE);
-
- const GLuint num_layers{target == GL_TEXTURE_CUBE_MAP_ARRAY ? 6u : 1u};
- constexpr GLuint min_layer = 0;
- constexpr GLuint min_level = 0;
-
- glGenTextures(1, &discrepant_view.handle);
- glTextureView(discrepant_view.handle, target, texture.handle, gl_internal_format, min_level,
- params.max_mip_level, min_layer, num_layers);
- ApplyTextureDefaults(discrepant_view.handle, params.max_mip_level);
- glTextureParameteriv(discrepant_view.handle, GL_TEXTURE_SWIZZLE_RGBA,
- reinterpret_cast<const GLint*>(swizzle.data()));
-}
-
-MICROPROFILE_DEFINE(OpenGL_TextureUL, "OpenGL", "Texture Upload", MP_RGB(128, 192, 64));
-void CachedSurface::UploadGLTexture(RasterizerTemporaryMemory& res_cache_tmp_mem,
- GLuint read_fb_handle, GLuint draw_fb_handle) {
- MICROPROFILE_SCOPE(OpenGL_TextureUL);
-
- for (u32 i = 0; i < params.max_mip_level; i++)
- UploadGLMipmapTexture(res_cache_tmp_mem, i, read_fb_handle, draw_fb_handle);
-}
-
-void CachedSurface::UpdateSwizzle(Tegra::Texture::SwizzleSource swizzle_x,
- Tegra::Texture::SwizzleSource swizzle_y,
- Tegra::Texture::SwizzleSource swizzle_z,
- Tegra::Texture::SwizzleSource swizzle_w) {
- const GLenum new_x = MaxwellToGL::SwizzleSource(swizzle_x);
- const GLenum new_y = MaxwellToGL::SwizzleSource(swizzle_y);
- const GLenum new_z = MaxwellToGL::SwizzleSource(swizzle_z);
- const GLenum new_w = MaxwellToGL::SwizzleSource(swizzle_w);
- if (swizzle[0] == new_x && swizzle[1] == new_y && swizzle[2] == new_z && swizzle[3] == new_w) {
- return;
- }
- swizzle = {new_x, new_y, new_z, new_w};
- const auto swizzle_data = reinterpret_cast<const GLint*>(swizzle.data());
- glTextureParameteriv(texture.handle, GL_TEXTURE_SWIZZLE_RGBA, swizzle_data);
- if (discrepant_view.handle != 0) {
- glTextureParameteriv(discrepant_view.handle, GL_TEXTURE_SWIZZLE_RGBA, swizzle_data);
- }
-}
-
-RasterizerCacheOpenGL::RasterizerCacheOpenGL(RasterizerOpenGL& rasterizer)
- : RasterizerCache{rasterizer} {
- read_framebuffer.Create();
- draw_framebuffer.Create();
- copy_pbo.Create();
-}
-
-Surface RasterizerCacheOpenGL::GetTextureSurface(const Tegra::Texture::FullTextureInfo& config,
- const GLShader::SamplerEntry& entry) {
- return GetSurface(SurfaceParams::CreateForTexture(config, entry));
-}
-
-Surface RasterizerCacheOpenGL::GetDepthBufferSurface(bool preserve_contents) {
- auto& gpu{Core::System::GetInstance().GPU().Maxwell3D()};
- const auto& regs{gpu.regs};
-
- if (!gpu.dirty_flags.zeta_buffer) {
- return last_depth_buffer;
- }
- gpu.dirty_flags.zeta_buffer = false;
-
- if (!regs.zeta.Address() || !regs.zeta_enable) {
- return last_depth_buffer = {};
- }
-
- SurfaceParams depth_params{SurfaceParams::CreateForDepthBuffer(
- regs.zeta_width, regs.zeta_height, regs.zeta.Address(), regs.zeta.format,
- regs.zeta.memory_layout.block_width, regs.zeta.memory_layout.block_height,
- regs.zeta.memory_layout.block_depth, regs.zeta.memory_layout.type)};
-
- return last_depth_buffer = GetSurface(depth_params, preserve_contents);
-}
-
-Surface RasterizerCacheOpenGL::GetColorBufferSurface(std::size_t index, bool preserve_contents) {
- auto& gpu{Core::System::GetInstance().GPU().Maxwell3D()};
- const auto& regs{gpu.regs};
-
- if (!gpu.dirty_flags.color_buffer[index]) {
- return current_color_buffers[index];
- }
- gpu.dirty_flags.color_buffer.reset(index);
-
- ASSERT(index < Tegra::Engines::Maxwell3D::Regs::NumRenderTargets);
-
- if (index >= regs.rt_control.count) {
- return current_color_buffers[index] = {};
- }
-
- if (regs.rt[index].Address() == 0 || regs.rt[index].format == Tegra::RenderTargetFormat::NONE) {
- return current_color_buffers[index] = {};
- }
-
- const SurfaceParams color_params{SurfaceParams::CreateForFramebuffer(index)};
-
- return current_color_buffers[index] = GetSurface(color_params, preserve_contents);
-}
-
-void RasterizerCacheOpenGL::LoadSurface(const Surface& surface) {
- surface->LoadGLBuffer(temporal_memory);
- surface->UploadGLTexture(temporal_memory, read_framebuffer.handle, draw_framebuffer.handle);
- surface->MarkAsModified(false, *this);
- surface->MarkForReload(false);
-}
-
-Surface RasterizerCacheOpenGL::GetSurface(const SurfaceParams& params, bool preserve_contents) {
- if (!params.IsValid()) {
- return {};
- }
-
- // Look up surface in the cache based on address
- Surface surface{TryGet(params.host_ptr)};
- if (surface) {
- if (surface->GetSurfaceParams().IsCompatibleSurface(params)) {
- // Use the cached surface as-is unless it's not synced with memory
- if (surface->MustReload())
- LoadSurface(surface);
- return surface;
- } else if (preserve_contents) {
- // If surface parameters changed and we care about keeping the previous data, recreate
- // the surface from the old one
- Surface new_surface{RecreateSurface(surface, params)};
- Unregister(surface);
- Register(new_surface);
- if (new_surface->IsUploaded()) {
- RegisterReinterpretSurface(new_surface);
- }
- return new_surface;
- } else {
- // Delete the old surface before creating a new one to prevent collisions.
- Unregister(surface);
- }
- }
-
- // No cached surface found - get a new one
- surface = GetUncachedSurface(params);
- Register(surface);
-
- // Only load surface from memory if we care about the contents
- if (preserve_contents) {
- LoadSurface(surface);
- }
-
- return surface;
-}
-
-Surface RasterizerCacheOpenGL::GetUncachedSurface(const SurfaceParams& params) {
- Surface surface{TryGetReservedSurface(params)};
- if (!surface) {
- // No reserved surface available, create a new one and reserve it
- surface = std::make_shared<CachedSurface>(params);
- ReserveSurface(surface);
- }
- return surface;
-}
-
-void RasterizerCacheOpenGL::FastLayeredCopySurface(const Surface& src_surface,
- const Surface& dst_surface) {
- const auto& init_params{src_surface->GetSurfaceParams()};
- const auto& dst_params{dst_surface->GetSurfaceParams()};
- auto& memory_manager{Core::System::GetInstance().GPU().MemoryManager()};
- GPUVAddr address{init_params.gpu_addr};
- const std::size_t layer_size{dst_params.LayerMemorySize()};
- for (u32 layer = 0; layer < dst_params.depth; layer++) {
- for (u32 mipmap = 0; mipmap < dst_params.max_mip_level; mipmap++) {
- const GPUVAddr sub_address{address + dst_params.GetMipmapLevelOffset(mipmap)};
- const Surface& copy{TryGet(memory_manager.GetPointer(sub_address))};
- if (!copy) {
- continue;
- }
- const auto& src_params{copy->GetSurfaceParams()};
- const u32 width{std::min(src_params.width, dst_params.MipWidth(mipmap))};
- const u32 height{std::min(src_params.height, dst_params.MipHeight(mipmap))};
-
- glCopyImageSubData(copy->Texture().handle, SurfaceTargetToGL(src_params.target), 0, 0,
- 0, 0, dst_surface->Texture().handle,
- SurfaceTargetToGL(dst_params.target), mipmap, 0, 0, layer, width,
- height, 1);
- }
- address += layer_size;
- }
-
- dst_surface->MarkAsModified(true, *this);
-}
-
-static bool BlitSurface(const Surface& src_surface, const Surface& dst_surface,
- const Common::Rectangle<u32>& src_rect,
- const Common::Rectangle<u32>& dst_rect, GLuint read_fb_handle,
- GLuint draw_fb_handle, GLenum src_attachment = 0, GLenum dst_attachment = 0,
- std::size_t cubemap_face = 0) {
-
- const auto& src_params{src_surface->GetSurfaceParams()};
- const auto& dst_params{dst_surface->GetSurfaceParams()};
-
- OpenGLState prev_state{OpenGLState::GetCurState()};
- SCOPE_EXIT({ prev_state.Apply(); });
-
- OpenGLState state;
- state.draw.read_framebuffer = read_fb_handle;
- state.draw.draw_framebuffer = draw_fb_handle;
- state.Apply();
-
- u32 buffers{};
-
- if (src_params.type == SurfaceType::ColorTexture) {
- switch (src_params.target) {
- case SurfaceTarget::Texture2D:
- glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + src_attachment,
- GL_TEXTURE_2D, src_surface->Texture().handle, 0);
- glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_TEXTURE_2D,
- 0, 0);
- break;
- case SurfaceTarget::TextureCubemap:
- glFramebufferTexture2D(
- GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + src_attachment,
- static_cast<GLenum>(GL_TEXTURE_CUBE_MAP_POSITIVE_X + cubemap_face),
- src_surface->Texture().handle, 0);
- glFramebufferTexture2D(
- GL_READ_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT,
- static_cast<GLenum>(GL_TEXTURE_CUBE_MAP_POSITIVE_X + cubemap_face), 0, 0);
- break;
- case SurfaceTarget::Texture2DArray:
- glFramebufferTextureLayer(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + src_attachment,
- src_surface->Texture().handle, 0, 0);
- glFramebufferTextureLayer(GL_READ_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, 0, 0, 0);
- break;
- case SurfaceTarget::Texture3D:
- glFramebufferTexture3D(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + src_attachment,
- SurfaceTargetToGL(src_params.target),
- src_surface->Texture().handle, 0, 0);
- glFramebufferTexture3D(GL_READ_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT,
- SurfaceTargetToGL(src_params.target), 0, 0, 0);
- break;
- default:
- glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + src_attachment,
- GL_TEXTURE_2D, src_surface->Texture().handle, 0);
- glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_TEXTURE_2D,
- 0, 0);
- break;
- }
-
- switch (dst_params.target) {
- case SurfaceTarget::Texture2D:
- glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + dst_attachment,
- GL_TEXTURE_2D, dst_surface->Texture().handle, 0);
- glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_TEXTURE_2D,
- 0, 0);
- break;
- case SurfaceTarget::TextureCubemap:
- glFramebufferTexture2D(
- GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + dst_attachment,
- static_cast<GLenum>(GL_TEXTURE_CUBE_MAP_POSITIVE_X + cubemap_face),
- dst_surface->Texture().handle, 0);
- glFramebufferTexture2D(
- GL_DRAW_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT,
- static_cast<GLenum>(GL_TEXTURE_CUBE_MAP_POSITIVE_X + cubemap_face), 0, 0);
- break;
- case SurfaceTarget::Texture2DArray:
- glFramebufferTextureLayer(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + dst_attachment,
- dst_surface->Texture().handle, 0, 0);
- glFramebufferTextureLayer(GL_DRAW_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, 0, 0, 0);
- break;
-
- case SurfaceTarget::Texture3D:
- glFramebufferTexture3D(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + dst_attachment,
- SurfaceTargetToGL(dst_params.target),
- dst_surface->Texture().handle, 0, 0);
- glFramebufferTexture3D(GL_DRAW_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT,
- SurfaceTargetToGL(dst_params.target), 0, 0, 0);
- break;
- default:
- glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + dst_attachment,
- GL_TEXTURE_2D, dst_surface->Texture().handle, 0);
- glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_TEXTURE_2D,
- 0, 0);
- break;
- }
-
- buffers = GL_COLOR_BUFFER_BIT;
- } else if (src_params.type == SurfaceType::Depth) {
- glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + src_attachment,
- GL_TEXTURE_2D, 0, 0);
- glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D,
- src_surface->Texture().handle, 0);
- glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_STENCIL_ATTACHMENT, GL_TEXTURE_2D, 0, 0);
-
- glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + dst_attachment,
- GL_TEXTURE_2D, 0, 0);
- glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D,
- dst_surface->Texture().handle, 0);
- glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_STENCIL_ATTACHMENT, GL_TEXTURE_2D, 0, 0);
-
- buffers = GL_DEPTH_BUFFER_BIT;
- } else if (src_params.type == SurfaceType::DepthStencil) {
- glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + src_attachment,
- GL_TEXTURE_2D, 0, 0);
- glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_TEXTURE_2D,
- src_surface->Texture().handle, 0);
-
- glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + dst_attachment,
- GL_TEXTURE_2D, 0, 0);
- glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_TEXTURE_2D,
- dst_surface->Texture().handle, 0);
-
- buffers = GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT;
- }
-
- glBlitFramebuffer(src_rect.left, src_rect.top, src_rect.right, src_rect.bottom, dst_rect.left,
- dst_rect.top, dst_rect.right, dst_rect.bottom, buffers,
- buffers == GL_COLOR_BUFFER_BIT ? GL_LINEAR : GL_NEAREST);
-
- return true;
-}
-
-void RasterizerCacheOpenGL::FermiCopySurface(
- const Tegra::Engines::Fermi2D::Regs::Surface& src_config,
- const Tegra::Engines::Fermi2D::Regs::Surface& dst_config,
- const Common::Rectangle<u32>& src_rect, const Common::Rectangle<u32>& dst_rect) {
-
- const auto& src_params = SurfaceParams::CreateForFermiCopySurface(src_config);
- const auto& dst_params = SurfaceParams::CreateForFermiCopySurface(dst_config);
-
- ASSERT(src_params.pixel_format == dst_params.pixel_format);
- ASSERT(src_params.block_height == dst_params.block_height);
- ASSERT(src_params.is_tiled == dst_params.is_tiled);
- ASSERT(src_params.depth == dst_params.depth);
- ASSERT(src_params.target == dst_params.target);
- ASSERT(src_params.rt.index == dst_params.rt.index);
-
- auto src_surface = GetSurface(src_params, true);
- auto dst_surface = GetSurface(dst_params, true);
-
- BlitSurface(src_surface, dst_surface, src_rect, dst_rect, read_framebuffer.handle,
- draw_framebuffer.handle);
-
- dst_surface->MarkAsModified(true, *this);
-}
-
-void RasterizerCacheOpenGL::AccurateCopySurface(const Surface& src_surface,
- const Surface& dst_surface) {
- const auto& src_params{src_surface->GetSurfaceParams()};
- const auto& dst_params{dst_surface->GetSurfaceParams()};
-
- // Flush enough memory for both the source and destination surface
- FlushRegion(ToCacheAddr(src_params.host_ptr),
- std::max(src_params.MemorySize(), dst_params.MemorySize()));
-
- LoadSurface(dst_surface);
-}
-
-Surface RasterizerCacheOpenGL::RecreateSurface(const Surface& old_surface,
- const SurfaceParams& new_params) {
- // Verify surface is compatible for blitting
- auto old_params{old_surface->GetSurfaceParams()};
-
- // Get a new surface with the new parameters, and blit the previous surface to it
- Surface new_surface{GetUncachedSurface(new_params)};
-
- // With use_accurate_gpu_emulation enabled, do an accurate surface copy
- if (Settings::values.use_accurate_gpu_emulation) {
- AccurateCopySurface(old_surface, new_surface);
- return new_surface;
- }
-
- const bool old_compressed =
- GetFormatTuple(old_params.pixel_format, old_params.component_type).compressed;
- const bool new_compressed =
- GetFormatTuple(new_params.pixel_format, new_params.component_type).compressed;
- const bool compatible_formats =
- GetFormatBpp(old_params.pixel_format) == GetFormatBpp(new_params.pixel_format) &&
- !(old_compressed || new_compressed);
- // For compatible surfaces, we can just do fast glCopyImageSubData based copy
- if (old_params.target == new_params.target && old_params.depth == new_params.depth &&
- old_params.depth == 1 && compatible_formats) {
- FastCopySurface(old_surface, new_surface);
- return new_surface;
- }
-
- switch (new_params.target) {
- case SurfaceTarget::Texture2D:
- CopySurface(old_surface, new_surface, copy_pbo.handle);
- break;
- case SurfaceTarget::Texture3D:
- AccurateCopySurface(old_surface, new_surface);
- break;
- case SurfaceTarget::TextureCubemap:
- case SurfaceTarget::Texture2DArray:
- case SurfaceTarget::TextureCubeArray:
- if (compatible_formats)
- FastLayeredCopySurface(old_surface, new_surface);
- else {
- AccurateCopySurface(old_surface, new_surface);
- }
- break;
- default:
- LOG_CRITICAL(Render_OpenGL, "Unimplemented surface target={}",
- static_cast<u32>(new_params.target));
- UNREACHABLE();
- }
-
- return new_surface;
-}
-
-Surface RasterizerCacheOpenGL::TryFindFramebufferSurface(const u8* host_ptr) const {
- return TryGet(host_ptr);
-}
-
-void RasterizerCacheOpenGL::ReserveSurface(const Surface& surface) {
- const auto& surface_reserve_key{SurfaceReserveKey::Create(surface->GetSurfaceParams())};
- surface_reserve[surface_reserve_key] = surface;
-}
-
-Surface RasterizerCacheOpenGL::TryGetReservedSurface(const SurfaceParams& params) {
- const auto& surface_reserve_key{SurfaceReserveKey::Create(params)};
- auto search{surface_reserve.find(surface_reserve_key)};
- if (search != surface_reserve.end()) {
- return search->second;
- }
- return {};
-}
-
-static std::optional<u32> TryFindBestMipMap(std::size_t memory, const SurfaceParams params,
- u32 height) {
- for (u32 i = 0; i < params.max_mip_level; i++) {
- if (memory == params.GetMipmapSingleSize(i) && params.MipHeight(i) == height) {
- return {i};
- }
- }
- return {};
-}
-
-static std::optional<u32> TryFindBestLayer(GPUVAddr addr, const SurfaceParams params, u32 mipmap) {
- const std::size_t size{params.LayerMemorySize()};
- GPUVAddr start{params.gpu_addr + params.GetMipmapLevelOffset(mipmap)};
- for (u32 i = 0; i < params.depth; i++) {
- if (start == addr) {
- return {i};
- }
- start += size;
- }
- return {};
-}
-
-static bool LayerFitReinterpretSurface(RasterizerCacheOpenGL& cache, const Surface render_surface,
- const Surface blitted_surface) {
- const auto& dst_params = blitted_surface->GetSurfaceParams();
- const auto& src_params = render_surface->GetSurfaceParams();
- const std::size_t src_memory_size = src_params.size_in_bytes;
- const std::optional<u32> level =
- TryFindBestMipMap(src_memory_size, dst_params, src_params.height);
- if (level.has_value()) {
- if (src_params.width == dst_params.MipWidthGobAligned(*level) &&
- src_params.height == dst_params.MipHeight(*level) &&
- src_params.block_height >= dst_params.MipBlockHeight(*level)) {
- const std::optional<u32> slot =
- TryFindBestLayer(render_surface->GetSurfaceParams().gpu_addr, dst_params, *level);
- if (slot.has_value()) {
- glCopyImageSubData(render_surface->Texture().handle,
- SurfaceTargetToGL(src_params.target), 0, 0, 0, 0,
- blitted_surface->Texture().handle,
- SurfaceTargetToGL(dst_params.target), *level, 0, 0, *slot,
- dst_params.MipWidth(*level), dst_params.MipHeight(*level), 1);
- blitted_surface->MarkAsModified(true, cache);
- return true;
- }
- }
- }
- return false;
-}
-
-static bool IsReinterpretInvalid(const Surface render_surface, const Surface blitted_surface) {
- const VAddr bound1 = blitted_surface->GetCpuAddr() + blitted_surface->GetMemorySize();
- const VAddr bound2 = render_surface->GetCpuAddr() + render_surface->GetMemorySize();
- if (bound2 > bound1)
- return true;
- const auto& dst_params = blitted_surface->GetSurfaceParams();
- const auto& src_params = render_surface->GetSurfaceParams();
- return (dst_params.component_type != src_params.component_type);
-}
-
-static bool IsReinterpretInvalidSecond(const Surface render_surface,
- const Surface blitted_surface) {
- const auto& dst_params = blitted_surface->GetSurfaceParams();
- const auto& src_params = render_surface->GetSurfaceParams();
- return (dst_params.height > src_params.height && dst_params.width > src_params.width);
-}
-
-bool RasterizerCacheOpenGL::PartialReinterpretSurface(Surface triggering_surface,
- Surface intersect) {
- if (IsReinterpretInvalid(triggering_surface, intersect)) {
- Unregister(intersect);
- return false;
- }
- if (!LayerFitReinterpretSurface(*this, triggering_surface, intersect)) {
- if (IsReinterpretInvalidSecond(triggering_surface, intersect)) {
- Unregister(intersect);
- return false;
- }
- FlushObject(intersect);
- FlushObject(triggering_surface);
- intersect->MarkForReload(true);
- }
- return true;
-}
-
-void RasterizerCacheOpenGL::SignalPreDrawCall() {
- if (texception && GLAD_GL_ARB_texture_barrier) {
- glTextureBarrier();
- }
- texception = false;
-}
-
-void RasterizerCacheOpenGL::SignalPostDrawCall() {
- for (u32 i = 0; i < Maxwell::NumRenderTargets; i++) {
- if (current_color_buffers[i] != nullptr) {
- Surface intersect =
- CollideOnReinterpretedSurface(current_color_buffers[i]->GetCacheAddr());
- if (intersect != nullptr) {
- PartialReinterpretSurface(current_color_buffers[i], intersect);
- texception = true;
- }
- }
- }
-}
-
-} // namespace OpenGL
diff --git a/src/video_core/renderer_opengl/gl_rasterizer_cache.h b/src/video_core/renderer_opengl/gl_rasterizer_cache.h
deleted file mode 100644
index 6263ef3e7..000000000
--- a/src/video_core/renderer_opengl/gl_rasterizer_cache.h
+++ /dev/null
@@ -1,572 +0,0 @@
-// Copyright 2018 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
-
-#pragma once
-
-#include <array>
-#include <memory>
-#include <string>
-#include <tuple>
-#include <vector>
-
-#include "common/alignment.h"
-#include "common/bit_util.h"
-#include "common/common_types.h"
-#include "common/hash.h"
-#include "common/math_util.h"
-#include "video_core/engines/fermi_2d.h"
-#include "video_core/engines/maxwell_3d.h"
-#include "video_core/rasterizer_cache.h"
-#include "video_core/renderer_opengl/gl_resource_manager.h"
-#include "video_core/renderer_opengl/gl_shader_gen.h"
-#include "video_core/surface.h"
-#include "video_core/textures/decoders.h"
-#include "video_core/textures/texture.h"
-
-namespace OpenGL {
-
-class CachedSurface;
-using Surface = std::shared_ptr<CachedSurface>;
-using SurfaceSurfaceRect_Tuple = std::tuple<Surface, Surface, Common::Rectangle<u32>>;
-
-using SurfaceTarget = VideoCore::Surface::SurfaceTarget;
-using SurfaceType = VideoCore::Surface::SurfaceType;
-using PixelFormat = VideoCore::Surface::PixelFormat;
-using ComponentType = VideoCore::Surface::ComponentType;
-using Maxwell = Tegra::Engines::Maxwell3D::Regs;
-
-struct SurfaceParams {
- enum class SurfaceClass {
- Uploaded,
- RenderTarget,
- DepthBuffer,
- Copy,
- };
-
- static std::string SurfaceTargetName(SurfaceTarget target) {
- switch (target) {
- case SurfaceTarget::Texture1D:
- return "Texture1D";
- case SurfaceTarget::Texture2D:
- return "Texture2D";
- case SurfaceTarget::Texture3D:
- return "Texture3D";
- case SurfaceTarget::Texture1DArray:
- return "Texture1DArray";
- case SurfaceTarget::Texture2DArray:
- return "Texture2DArray";
- case SurfaceTarget::TextureCubemap:
- return "TextureCubemap";
- case SurfaceTarget::TextureCubeArray:
- return "TextureCubeArray";
- default:
- LOG_CRITICAL(HW_GPU, "Unimplemented surface_target={}", static_cast<u32>(target));
- UNREACHABLE();
- return fmt::format("TextureUnknown({})", static_cast<u32>(target));
- }
- }
-
- u32 GetFormatBpp() const {
- return VideoCore::Surface::GetFormatBpp(pixel_format);
- }
-
- /// Returns the rectangle corresponding to this surface
- Common::Rectangle<u32> GetRect(u32 mip_level = 0) const;
-
- /// Returns the total size of this surface in bytes, adjusted for compression
- std::size_t SizeInBytesRaw(bool ignore_tiled = false) const {
- const u32 compression_factor{GetCompressionFactor(pixel_format)};
- const u32 bytes_per_pixel{GetBytesPerPixel(pixel_format)};
- const size_t uncompressed_size{
- Tegra::Texture::CalculateSize((ignore_tiled ? false : is_tiled), bytes_per_pixel, width,
- height, depth, block_height, block_depth)};
-
- // Divide by compression_factor^2, as height and width are factored by this
- return uncompressed_size / (compression_factor * compression_factor);
- }
-
- /// Returns the size of this surface as an OpenGL texture in bytes
- std::size_t SizeInBytesGL() const {
- return SizeInBytesRaw(true);
- }
-
- /// Returns the size of this surface as a cube face in bytes
- std::size_t SizeInBytesCubeFace() const {
- return size_in_bytes / 6;
- }
-
- /// Returns the size of this surface as an OpenGL cube face in bytes
- std::size_t SizeInBytesCubeFaceGL() const {
- return size_in_bytes_gl / 6;
- }
-
- /// Returns the exact size of memory occupied by the texture in VRAM, including mipmaps.
- std::size_t MemorySize() const {
- std::size_t size = InnerMemorySize(false, is_layered);
- if (is_layered)
- return size * depth;
- return size;
- }
-
- /// Returns true if the parameters constitute a valid rasterizer surface.
- bool IsValid() const {
- return gpu_addr && host_ptr && height && width;
- }
-
- /// Returns the exact size of the memory occupied by a layer in a texture in VRAM, including
- /// mipmaps.
- std::size_t LayerMemorySize() const {
- return InnerMemorySize(false, true);
- }
-
- /// Returns the size of a layer of this surface in OpenGL.
- std::size_t LayerSizeGL(u32 mip_level) const {
- return InnerMipmapMemorySize(mip_level, true, is_layered, false);
- }
-
- std::size_t GetMipmapSizeGL(u32 mip_level, bool ignore_compressed = true) const {
- std::size_t size = InnerMipmapMemorySize(mip_level, true, is_layered, ignore_compressed);
- if (is_layered)
- return size * depth;
- return size;
- }
-
- std::size_t GetMipmapLevelOffset(u32 mip_level) const {
- std::size_t offset = 0;
- for (u32 i = 0; i < mip_level; i++)
- offset += InnerMipmapMemorySize(i, false, is_layered);
- return offset;
- }
-
- std::size_t GetMipmapLevelOffsetGL(u32 mip_level) const {
- std::size_t offset = 0;
- for (u32 i = 0; i < mip_level; i++)
- offset += InnerMipmapMemorySize(i, true, is_layered);
- return offset;
- }
-
- std::size_t GetMipmapSingleSize(u32 mip_level) const {
- return InnerMipmapMemorySize(mip_level, false, is_layered);
- }
-
- u32 MipWidth(u32 mip_level) const {
- return std::max(1U, width >> mip_level);
- }
-
- u32 MipWidthGobAligned(u32 mip_level) const {
- return Common::AlignUp(std::max(1U, width >> mip_level), 64U * 8U / GetFormatBpp());
- }
-
- u32 MipHeight(u32 mip_level) const {
- return std::max(1U, height >> mip_level);
- }
-
- u32 MipDepth(u32 mip_level) const {
- return is_layered ? depth : std::max(1U, depth >> mip_level);
- }
-
- // Auto block resizing algorithm from:
- // https://cgit.freedesktop.org/mesa/mesa/tree/src/gallium/drivers/nouveau/nv50/nv50_miptree.c
- u32 MipBlockHeight(u32 mip_level) const {
- if (mip_level == 0)
- return block_height;
- u32 alt_height = MipHeight(mip_level);
- u32 h = GetDefaultBlockHeight(pixel_format);
- u32 blocks_in_y = (alt_height + h - 1) / h;
- u32 bh = 16;
- while (bh > 1 && blocks_in_y <= bh * 4) {
- bh >>= 1;
- }
- return bh;
- }
-
- u32 MipBlockDepth(u32 mip_level) const {
- if (mip_level == 0) {
- return block_depth;
- }
-
- if (is_layered) {
- return 1;
- }
-
- const u32 mip_depth = MipDepth(mip_level);
- u32 bd = 32;
- while (bd > 1 && mip_depth * 2 <= bd) {
- bd >>= 1;
- }
-
- if (bd == 32) {
- const u32 bh = MipBlockHeight(mip_level);
- if (bh >= 4) {
- return 16;
- }
- }
-
- return bd;
- }
-
- u32 RowAlign(u32 mip_level) const {
- const u32 m_width = MipWidth(mip_level);
- const u32 bytes_per_pixel = GetBytesPerPixel(pixel_format);
- const u32 l2 = Common::CountTrailingZeroes32(m_width * bytes_per_pixel);
- return (1U << l2);
- }
-
- /// Creates SurfaceParams from a texture configuration
- static SurfaceParams CreateForTexture(const Tegra::Texture::FullTextureInfo& config,
- const GLShader::SamplerEntry& entry);
-
- /// Creates SurfaceParams from a framebuffer configuration
- static SurfaceParams CreateForFramebuffer(std::size_t index);
-
- /// Creates SurfaceParams for a depth buffer configuration
- static SurfaceParams CreateForDepthBuffer(
- u32 zeta_width, u32 zeta_height, GPUVAddr zeta_address, Tegra::DepthFormat format,
- u32 block_width, u32 block_height, u32 block_depth,
- Tegra::Engines::Maxwell3D::Regs::InvMemoryLayout type);
-
- /// Creates SurfaceParams for a Fermi2D surface copy
- static SurfaceParams CreateForFermiCopySurface(
- const Tegra::Engines::Fermi2D::Regs::Surface& config);
-
- /// Checks if surfaces are compatible for caching
- bool IsCompatibleSurface(const SurfaceParams& other) const {
- if (std::tie(pixel_format, type, width, height, target, depth, is_tiled) ==
- std::tie(other.pixel_format, other.type, other.width, other.height, other.target,
- other.depth, other.is_tiled)) {
- if (!is_tiled)
- return true;
- return std::tie(block_height, block_depth, tile_width_spacing) ==
- std::tie(other.block_height, other.block_depth, other.tile_width_spacing);
- }
- return false;
- }
-
- /// Initializes parameters for caching, should be called after everything has been initialized
- void InitCacheParameters(GPUVAddr gpu_addr);
-
- std::string TargetName() const {
- switch (target) {
- case SurfaceTarget::Texture1D:
- return "1D";
- case SurfaceTarget::Texture2D:
- return "2D";
- case SurfaceTarget::Texture3D:
- return "3D";
- case SurfaceTarget::Texture1DArray:
- return "1DArray";
- case SurfaceTarget::Texture2DArray:
- return "2DArray";
- case SurfaceTarget::TextureCubemap:
- return "Cube";
- default:
- LOG_CRITICAL(HW_GPU, "Unimplemented surface_target={}", static_cast<u32>(target));
- UNREACHABLE();
- return fmt::format("TUK({})", static_cast<u32>(target));
- }
- }
-
- std::string ClassName() const {
- switch (identity) {
- case SurfaceClass::Uploaded:
- return "UP";
- case SurfaceClass::RenderTarget:
- return "RT";
- case SurfaceClass::DepthBuffer:
- return "DB";
- case SurfaceClass::Copy:
- return "CP";
- default:
- LOG_CRITICAL(HW_GPU, "Unimplemented surface_class={}", static_cast<u32>(identity));
- UNREACHABLE();
- return fmt::format("CUK({})", static_cast<u32>(identity));
- }
- }
-
- std::string IdentityString() const {
- return ClassName() + '_' + TargetName() + '_' + (is_tiled ? 'T' : 'L');
- }
-
- bool is_tiled;
- u32 block_width;
- u32 block_height;
- u32 block_depth;
- u32 tile_width_spacing;
- PixelFormat pixel_format;
- ComponentType component_type;
- SurfaceType type;
- u32 width;
- u32 height;
- u32 depth;
- u32 unaligned_height;
- u32 pitch;
- SurfaceTarget target;
- SurfaceClass identity;
- u32 max_mip_level;
- bool is_layered;
- bool is_array;
- bool srgb_conversion;
- // Parameters used for caching
- u8* host_ptr;
- GPUVAddr gpu_addr;
- std::size_t size_in_bytes;
- std::size_t size_in_bytes_gl;
-
- // Render target specific parameters, not used in caching
- struct {
- u32 index;
- u32 array_mode;
- u32 volume;
- u32 layer_stride;
- u32 base_layer;
- } rt;
-
-private:
- std::size_t InnerMipmapMemorySize(u32 mip_level, bool force_gl = false, bool layer_only = false,
- bool uncompressed = false) const;
- std::size_t InnerMemorySize(bool force_gl = false, bool layer_only = false,
- bool uncompressed = false) const;
-};
-
-}; // namespace OpenGL
-
-/// Hashable variation of SurfaceParams, used for a key in the surface cache
-struct SurfaceReserveKey : Common::HashableStruct<OpenGL::SurfaceParams> {
- static SurfaceReserveKey Create(const OpenGL::SurfaceParams& params) {
- SurfaceReserveKey res;
- res.state = params;
- res.state.identity = {}; // Ignore the origin of the texture
- res.state.gpu_addr = {}; // Ignore GPU vaddr in caching
- res.state.rt = {}; // Ignore rt config in caching
- return res;
- }
-};
-namespace std {
-template <>
-struct hash<SurfaceReserveKey> {
- std::size_t operator()(const SurfaceReserveKey& k) const {
- return k.Hash();
- }
-};
-} // namespace std
-
-namespace OpenGL {
-
-class RasterizerOpenGL;
-
-// This is used to store temporary big buffers,
-// instead of creating/destroying all the time
-struct RasterizerTemporaryMemory {
- std::vector<std::vector<u8>> gl_buffer;
-};
-
-class CachedSurface final : public RasterizerCacheObject {
-public:
- explicit CachedSurface(const SurfaceParams& params);
-
- VAddr GetCpuAddr() const override {
- return cpu_addr;
- }
-
- std::size_t GetSizeInBytes() const override {
- return cached_size_in_bytes;
- }
-
- std::size_t GetMemorySize() const {
- return memory_size;
- }
-
- const OGLTexture& Texture() const {
- return texture;
- }
-
- const OGLTexture& Texture(bool as_array) {
- if (params.is_array == as_array) {
- return texture;
- } else {
- EnsureTextureDiscrepantView();
- return discrepant_view;
- }
- }
-
- GLenum Target() const {
- return gl_target;
- }
-
- const SurfaceParams& GetSurfaceParams() const {
- return params;
- }
-
- // Read/Write data in Switch memory to/from gl_buffer
- void LoadGLBuffer(RasterizerTemporaryMemory& res_cache_tmp_mem);
- void FlushGLBuffer(RasterizerTemporaryMemory& res_cache_tmp_mem);
-
- // Upload data in gl_buffer to this surface's texture
- void UploadGLTexture(RasterizerTemporaryMemory& res_cache_tmp_mem, GLuint read_fb_handle,
- GLuint draw_fb_handle);
-
- void UpdateSwizzle(Tegra::Texture::SwizzleSource swizzle_x,
- Tegra::Texture::SwizzleSource swizzle_y,
- Tegra::Texture::SwizzleSource swizzle_z,
- Tegra::Texture::SwizzleSource swizzle_w);
-
- void MarkReinterpreted() {
- reinterpreted = true;
- }
-
- bool IsReinterpreted() const {
- return reinterpreted;
- }
-
- void MarkForReload(bool reload) {
- must_reload = reload;
- }
-
- bool MustReload() const {
- return must_reload;
- }
-
- bool IsUploaded() const {
- return params.identity == SurfaceParams::SurfaceClass::Uploaded;
- }
-
-private:
- void UploadGLMipmapTexture(RasterizerTemporaryMemory& res_cache_tmp_mem, u32 mip_map,
- GLuint read_fb_handle, GLuint draw_fb_handle);
-
- void EnsureTextureDiscrepantView();
-
- OGLTexture texture;
- OGLTexture discrepant_view;
- SurfaceParams params{};
- GLenum gl_target{};
- GLenum gl_internal_format{};
- std::size_t cached_size_in_bytes{};
- std::array<GLenum, 4> swizzle{GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA};
- std::size_t memory_size;
- bool reinterpreted = false;
- bool must_reload = false;
- VAddr cpu_addr{};
-};
-
-class RasterizerCacheOpenGL final : public RasterizerCache<Surface> {
-public:
- explicit RasterizerCacheOpenGL(RasterizerOpenGL& rasterizer);
-
- /// Get a surface based on the texture configuration
- Surface GetTextureSurface(const Tegra::Texture::FullTextureInfo& config,
- const GLShader::SamplerEntry& entry);
-
- /// Get the depth surface based on the framebuffer configuration
- Surface GetDepthBufferSurface(bool preserve_contents);
-
- /// Get the color surface based on the framebuffer configuration and the specified render target
- Surface GetColorBufferSurface(std::size_t index, bool preserve_contents);
-
- /// Tries to find a framebuffer using on the provided CPU address
- Surface TryFindFramebufferSurface(const u8* host_ptr) const;
-
- /// Copies the contents of one surface to another
- void FermiCopySurface(const Tegra::Engines::Fermi2D::Regs::Surface& src_config,
- const Tegra::Engines::Fermi2D::Regs::Surface& dst_config,
- const Common::Rectangle<u32>& src_rect,
- const Common::Rectangle<u32>& dst_rect);
-
- void SignalPreDrawCall();
- void SignalPostDrawCall();
-
-protected:
- void FlushObjectInner(const Surface& object) override {
- object->FlushGLBuffer(temporal_memory);
- }
-
-private:
- void LoadSurface(const Surface& surface);
- Surface GetSurface(const SurfaceParams& params, bool preserve_contents = true);
-
- /// Gets an uncached surface, creating it if need be
- Surface GetUncachedSurface(const SurfaceParams& params);
-
- /// Recreates a surface with new parameters
- Surface RecreateSurface(const Surface& old_surface, const SurfaceParams& new_params);
-
- /// Reserves a unique surface that can be reused later
- void ReserveSurface(const Surface& surface);
-
- /// Tries to get a reserved surface for the specified parameters
- Surface TryGetReservedSurface(const SurfaceParams& params);
-
- // Partialy reinterpret a surface based on a triggering_surface that collides with it.
- // returns true if the reinterpret was successful, false in case it was not.
- bool PartialReinterpretSurface(Surface triggering_surface, Surface intersect);
-
- /// Performs a slow but accurate surface copy, flushing to RAM and reinterpreting the data
- void AccurateCopySurface(const Surface& src_surface, const Surface& dst_surface);
- void FastLayeredCopySurface(const Surface& src_surface, const Surface& dst_surface);
- void FastCopySurface(const Surface& src_surface, const Surface& dst_surface);
- void CopySurface(const Surface& src_surface, const Surface& dst_surface,
- const GLuint copy_pbo_handle, const GLenum src_attachment = 0,
- const GLenum dst_attachment = 0, const std::size_t cubemap_face = 0);
-
- /// The surface reserve is a "backup" cache, this is where we put unique surfaces that have
- /// previously been used. This is to prevent surfaces from being constantly created and
- /// destroyed when used with different surface parameters.
- std::unordered_map<SurfaceReserveKey, Surface> surface_reserve;
-
- OGLFramebuffer read_framebuffer;
- OGLFramebuffer draw_framebuffer;
-
- bool texception = false;
-
- /// Use a Pixel Buffer Object to download the previous texture and then upload it to the new one
- /// using the new format.
- OGLBuffer copy_pbo;
-
- std::array<Surface, Maxwell::NumRenderTargets> last_color_buffers;
- std::array<Surface, Maxwell::NumRenderTargets> current_color_buffers;
- Surface last_depth_buffer;
-
- RasterizerTemporaryMemory temporal_memory;
-
- using SurfaceIntervalCache = boost::icl::interval_map<CacheAddr, Surface>;
- using SurfaceInterval = typename SurfaceIntervalCache::interval_type;
-
- static auto GetReinterpretInterval(const Surface& object) {
- return SurfaceInterval::right_open(object->GetCacheAddr() + 1,
- object->GetCacheAddr() + object->GetMemorySize() - 1);
- }
-
- // Reinterpreted surfaces are very fragil as the game may keep rendering into them.
- SurfaceIntervalCache reinterpreted_surfaces;
-
- void RegisterReinterpretSurface(Surface reinterpret_surface) {
- auto interval = GetReinterpretInterval(reinterpret_surface);
- reinterpreted_surfaces.insert({interval, reinterpret_surface});
- reinterpret_surface->MarkReinterpreted();
- }
-
- Surface CollideOnReinterpretedSurface(CacheAddr addr) const {
- const SurfaceInterval interval{addr};
- for (auto& pair :
- boost::make_iterator_range(reinterpreted_surfaces.equal_range(interval))) {
- return pair.second;
- }
- return nullptr;
- }
-
- void Register(const Surface& object) override {
- RasterizerCache<Surface>::Register(object);
- }
-
- /// Unregisters an object from the cache
- void Unregister(const Surface& object) override {
- if (object->IsReinterpreted()) {
- auto interval = GetReinterpretInterval(object);
- reinterpreted_surfaces.erase(interval);
- }
- RasterizerCache<Surface>::Unregister(object);
- }
-};
-
-} // namespace OpenGL
diff --git a/src/video_core/renderer_opengl/gl_resource_manager.cpp b/src/video_core/renderer_opengl/gl_resource_manager.cpp
index bfe666a73..5c96c1d46 100644
--- a/src/video_core/renderer_opengl/gl_resource_manager.cpp
+++ b/src/video_core/renderer_opengl/gl_resource_manager.cpp
@@ -33,6 +33,24 @@ void OGLTexture::Release() {
handle = 0;
}
+void OGLTextureView::Create() {
+ if (handle != 0)
+ return;
+
+ MICROPROFILE_SCOPE(OpenGL_ResourceCreation);
+ glGenTextures(1, &handle);
+}
+
+void OGLTextureView::Release() {
+ if (handle == 0)
+ return;
+
+ MICROPROFILE_SCOPE(OpenGL_ResourceDeletion);
+ glDeleteTextures(1, &handle);
+ OpenGLState::GetCurState().UnbindTexture(handle).Apply();
+ handle = 0;
+}
+
void OGLSampler::Create() {
if (handle != 0)
return;
@@ -130,6 +148,12 @@ void OGLBuffer::Release() {
handle = 0;
}
+void OGLBuffer::MakeStreamCopy(std::size_t buffer_size) {
+ ASSERT_OR_EXECUTE((handle != 0 && buffer_size != 0), { return; });
+
+ glNamedBufferData(handle, buffer_size, nullptr, GL_STREAM_COPY);
+}
+
void OGLSync::Create() {
if (handle != 0)
return;
diff --git a/src/video_core/renderer_opengl/gl_resource_manager.h b/src/video_core/renderer_opengl/gl_resource_manager.h
index fbb93ee49..3a85a1d4c 100644
--- a/src/video_core/renderer_opengl/gl_resource_manager.h
+++ b/src/video_core/renderer_opengl/gl_resource_manager.h
@@ -36,6 +36,31 @@ public:
GLuint handle = 0;
};
+class OGLTextureView : private NonCopyable {
+public:
+ OGLTextureView() = default;
+
+ OGLTextureView(OGLTextureView&& o) noexcept : handle(std::exchange(o.handle, 0)) {}
+
+ ~OGLTextureView() {
+ Release();
+ }
+
+ OGLTextureView& operator=(OGLTextureView&& o) noexcept {
+ Release();
+ handle = std::exchange(o.handle, 0);
+ return *this;
+ }
+
+ /// Creates a new internal OpenGL resource and stores the handle
+ void Create();
+
+ /// Deletes the internal OpenGL resource
+ void Release();
+
+ GLuint handle = 0;
+};
+
class OGLSampler : private NonCopyable {
public:
OGLSampler() = default;
@@ -161,6 +186,9 @@ public:
/// Deletes the internal OpenGL resource
void Release();
+ // Converts the buffer into a stream copy buffer with a fixed size
+ void MakeStreamCopy(std::size_t buffer_size);
+
GLuint handle = 0;
};
diff --git a/src/video_core/renderer_opengl/gl_shader_cache.cpp b/src/video_core/renderer_opengl/gl_shader_cache.cpp
index ac8a9e6b7..f9b2b03a0 100644
--- a/src/video_core/renderer_opengl/gl_shader_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_shader_cache.cpp
@@ -103,15 +103,22 @@ constexpr std::tuple<const char*, const char*, u32> GetPrimitiveDescription(GLen
/// Calculates the size of a program stream
std::size_t CalculateProgramSize(const GLShader::ProgramCode& program) {
constexpr std::size_t start_offset = 10;
+ // This is the encoded version of BRA that jumps to itself. All Nvidia
+ // shaders end with one.
+ constexpr u64 self_jumping_branch = 0xE2400FFFFF07000FULL;
+ constexpr u64 mask = 0xFFFFFFFFFF7FFFFFULL;
std::size_t offset = start_offset;
std::size_t size = start_offset * sizeof(u64);
while (offset < program.size()) {
const u64 instruction = program[offset];
if (!IsSchedInstruction(offset, start_offset)) {
- if (instruction == 0 || (instruction >> 52) == 0x50b) {
+ if ((instruction & mask) == self_jumping_branch) {
// End on Maxwell's "nop" instruction
break;
}
+ if (instruction == 0) {
+ break;
+ }
}
size += sizeof(u64);
offset++;
@@ -168,8 +175,12 @@ GLShader::ProgramResult CreateProgram(const Device& device, Maxwell::ShaderProgr
}
CachedProgram SpecializeShader(const std::string& code, const GLShader::ShaderEntries& entries,
- Maxwell::ShaderProgram program_type, BaseBindings base_bindings,
- GLenum primitive_mode, bool hint_retrievable = false) {
+ Maxwell::ShaderProgram program_type, const ProgramVariant& variant,
+ bool hint_retrievable = false) {
+ auto base_bindings{variant.base_bindings};
+ const auto primitive_mode{variant.primitive_mode};
+ const auto texture_buffer_usage{variant.texture_buffer_usage};
+
std::string source = "#version 430 core\n"
"#extension GL_ARB_separate_shader_objects : enable\n\n";
source += fmt::format("#define EMULATION_UBO_BINDING {}\n", base_bindings.cbuf++);
@@ -186,6 +197,18 @@ CachedProgram SpecializeShader(const std::string& code, const GLShader::ShaderEn
source += fmt::format("#define SAMPLER_BINDING_{} {}\n", sampler.GetIndex(),
base_bindings.sampler++);
}
+ for (const auto& image : entries.images) {
+ source +=
+ fmt::format("#define IMAGE_BINDING_{} {}\n", image.GetIndex(), base_bindings.image++);
+ }
+
+ // Transform 1D textures to texture samplers by declaring its preprocessor macros.
+ for (std::size_t i = 0; i < texture_buffer_usage.size(); ++i) {
+ if (!texture_buffer_usage.test(i)) {
+ continue;
+ }
+ source += fmt::format("#define SAMPLER_{}_IS_BUFFER", i);
+ }
if (program_type == Maxwell::ShaderProgram::Geometry) {
const auto [glsl_topology, debug_name, max_vertices] =
@@ -221,60 +244,51 @@ std::set<GLenum> GetSupportedFormats() {
} // Anonymous namespace
-CachedShader::CachedShader(const Device& device, VAddr cpu_addr, u64 unique_identifier,
- Maxwell::ShaderProgram program_type, ShaderDiskCacheOpenGL& disk_cache,
- const PrecompiledPrograms& precompiled_programs,
- ProgramCode&& program_code, ProgramCode&& program_code_b, u8* host_ptr)
- : RasterizerCacheObject{host_ptr}, host_ptr{host_ptr}, cpu_addr{cpu_addr},
- unique_identifier{unique_identifier}, program_type{program_type}, disk_cache{disk_cache},
- precompiled_programs{precompiled_programs} {
- const std::size_t code_size{CalculateProgramSize(program_code)};
- const std::size_t code_size_b{program_code_b.empty() ? 0
- : CalculateProgramSize(program_code_b)};
- GLShader::ProgramResult program_result{
- CreateProgram(device, program_type, program_code, program_code_b)};
- if (program_result.first.empty()) {
+CachedShader::CachedShader(const ShaderParameters& params, Maxwell::ShaderProgram program_type,
+ GLShader::ProgramResult result)
+ : RasterizerCacheObject{params.host_ptr}, host_ptr{params.host_ptr}, cpu_addr{params.cpu_addr},
+ unique_identifier{params.unique_identifier}, program_type{program_type},
+ disk_cache{params.disk_cache}, precompiled_programs{params.precompiled_programs},
+ entries{result.second}, code{std::move(result.first)}, shader_length{entries.shader_length} {}
+
+Shader CachedShader::CreateStageFromMemory(const ShaderParameters& params,
+ Maxwell::ShaderProgram program_type,
+ ProgramCode&& program_code,
+ ProgramCode&& program_code_b) {
+ const auto code_size{CalculateProgramSize(program_code)};
+ const auto code_size_b{CalculateProgramSize(program_code_b)};
+ auto result{CreateProgram(params.device, program_type, program_code, program_code_b)};
+ if (result.first.empty()) {
// TODO(Rodrigo): Unimplemented shader stages hit here, avoid using these for now
- return;
+ return {};
}
- code = program_result.first;
- entries = program_result.second;
- shader_length = entries.shader_length;
+ params.disk_cache.SaveRaw(ShaderDiskCacheRaw(
+ params.unique_identifier, program_type, static_cast<u32>(code_size / sizeof(u64)),
+ static_cast<u32>(code_size_b / sizeof(u64)), std::move(program_code),
+ std::move(program_code_b)));
- const ShaderDiskCacheRaw raw(unique_identifier, program_type,
- static_cast<u32>(code_size / sizeof(u64)),
- static_cast<u32>(code_size_b / sizeof(u64)),
- std::move(program_code), std::move(program_code_b));
- disk_cache.SaveRaw(raw);
+ return std::shared_ptr<CachedShader>(new CachedShader(params, program_type, std::move(result)));
}
-CachedShader::CachedShader(VAddr cpu_addr, u64 unique_identifier,
- Maxwell::ShaderProgram program_type, ShaderDiskCacheOpenGL& disk_cache,
- const PrecompiledPrograms& precompiled_programs,
- GLShader::ProgramResult result, u8* host_ptr)
- : RasterizerCacheObject{host_ptr}, cpu_addr{cpu_addr}, unique_identifier{unique_identifier},
- program_type{program_type}, disk_cache{disk_cache}, precompiled_programs{
- precompiled_programs} {
- code = std::move(result.first);
- entries = result.second;
- shader_length = entries.shader_length;
+Shader CachedShader::CreateStageFromCache(const ShaderParameters& params,
+ Maxwell::ShaderProgram program_type,
+ GLShader::ProgramResult result) {
+ return std::shared_ptr<CachedShader>(new CachedShader(params, program_type, std::move(result)));
}
-std::tuple<GLuint, BaseBindings> CachedShader::GetProgramHandle(GLenum primitive_mode,
- BaseBindings base_bindings) {
+std::tuple<GLuint, BaseBindings> CachedShader::GetProgramHandle(const ProgramVariant& variant) {
GLuint handle{};
if (program_type == Maxwell::ShaderProgram::Geometry) {
- handle = GetGeometryShader(primitive_mode, base_bindings);
+ handle = GetGeometryShader(variant);
} else {
- const auto [entry, is_cache_miss] = programs.try_emplace(base_bindings);
+ const auto [entry, is_cache_miss] = programs.try_emplace(variant);
auto& program = entry->second;
if (is_cache_miss) {
- program = TryLoadProgram(primitive_mode, base_bindings);
+ program = TryLoadProgram(variant);
if (!program) {
- program =
- SpecializeShader(code, entries, program_type, base_bindings, primitive_mode);
- disk_cache.SaveUsage(GetUsage(primitive_mode, base_bindings));
+ program = SpecializeShader(code, entries, program_type, variant);
+ disk_cache.SaveUsage(GetUsage(variant));
}
LabelGLObject(GL_PROGRAM, program->handle, cpu_addr);
@@ -283,6 +297,7 @@ std::tuple<GLuint, BaseBindings> CachedShader::GetProgramHandle(GLenum primitive
handle = program->handle;
}
+ auto base_bindings{variant.base_bindings};
base_bindings.cbuf += static_cast<u32>(entries.const_buffers.size()) + RESERVED_UBOS;
base_bindings.gmem += static_cast<u32>(entries.global_memory_entries.size());
base_bindings.sampler += static_cast<u32>(entries.samplers.size());
@@ -290,43 +305,42 @@ std::tuple<GLuint, BaseBindings> CachedShader::GetProgramHandle(GLenum primitive
return {handle, base_bindings};
}
-GLuint CachedShader::GetGeometryShader(GLenum primitive_mode, BaseBindings base_bindings) {
- const auto [entry, is_cache_miss] = geometry_programs.try_emplace(base_bindings);
+GLuint CachedShader::GetGeometryShader(const ProgramVariant& variant) {
+ const auto [entry, is_cache_miss] = geometry_programs.try_emplace(variant);
auto& programs = entry->second;
- switch (primitive_mode) {
+ switch (variant.primitive_mode) {
case GL_POINTS:
- return LazyGeometryProgram(programs.points, base_bindings, primitive_mode);
+ return LazyGeometryProgram(programs.points, variant);
case GL_LINES:
case GL_LINE_STRIP:
- return LazyGeometryProgram(programs.lines, base_bindings, primitive_mode);
+ return LazyGeometryProgram(programs.lines, variant);
case GL_LINES_ADJACENCY:
case GL_LINE_STRIP_ADJACENCY:
- return LazyGeometryProgram(programs.lines_adjacency, base_bindings, primitive_mode);
+ return LazyGeometryProgram(programs.lines_adjacency, variant);
case GL_TRIANGLES:
case GL_TRIANGLE_STRIP:
case GL_TRIANGLE_FAN:
- return LazyGeometryProgram(programs.triangles, base_bindings, primitive_mode);
+ return LazyGeometryProgram(programs.triangles, variant);
case GL_TRIANGLES_ADJACENCY:
case GL_TRIANGLE_STRIP_ADJACENCY:
- return LazyGeometryProgram(programs.triangles_adjacency, base_bindings, primitive_mode);
+ return LazyGeometryProgram(programs.triangles_adjacency, variant);
default:
UNREACHABLE_MSG("Unknown primitive mode.");
- return LazyGeometryProgram(programs.points, base_bindings, primitive_mode);
+ return LazyGeometryProgram(programs.points, variant);
}
}
-GLuint CachedShader::LazyGeometryProgram(CachedProgram& target_program, BaseBindings base_bindings,
- GLenum primitive_mode) {
+GLuint CachedShader::LazyGeometryProgram(CachedProgram& target_program,
+ const ProgramVariant& variant) {
if (target_program) {
return target_program->handle;
}
- const auto [glsl_name, debug_name, vertices] = GetPrimitiveDescription(primitive_mode);
- target_program = TryLoadProgram(primitive_mode, base_bindings);
+ const auto [glsl_name, debug_name, vertices] = GetPrimitiveDescription(variant.primitive_mode);
+ target_program = TryLoadProgram(variant);
if (!target_program) {
- target_program =
- SpecializeShader(code, entries, program_type, base_bindings, primitive_mode);
- disk_cache.SaveUsage(GetUsage(primitive_mode, base_bindings));
+ target_program = SpecializeShader(code, entries, program_type, variant);
+ disk_cache.SaveUsage(GetUsage(variant));
}
LabelGLObject(GL_PROGRAM, target_program->handle, cpu_addr, debug_name);
@@ -334,18 +348,19 @@ GLuint CachedShader::LazyGeometryProgram(CachedProgram& target_program, BaseBind
return target_program->handle;
};
-CachedProgram CachedShader::TryLoadProgram(GLenum primitive_mode,
- BaseBindings base_bindings) const {
- const auto found = precompiled_programs.find(GetUsage(primitive_mode, base_bindings));
+CachedProgram CachedShader::TryLoadProgram(const ProgramVariant& variant) const {
+ const auto found = precompiled_programs.find(GetUsage(variant));
if (found == precompiled_programs.end()) {
return {};
}
return found->second;
}
-ShaderDiskCacheUsage CachedShader::GetUsage(GLenum primitive_mode,
- BaseBindings base_bindings) const {
- return {unique_identifier, base_bindings, primitive_mode};
+ShaderDiskCacheUsage CachedShader::GetUsage(const ProgramVariant& variant) const {
+ ShaderDiskCacheUsage usage;
+ usage.unique_identifier = unique_identifier;
+ usage.variant = variant;
+ return usage;
}
ShaderCacheOpenGL::ShaderCacheOpenGL(RasterizerOpenGL& rasterizer, Core::System& system,
@@ -411,8 +426,7 @@ void ShaderCacheOpenGL::LoadDiskCache(const std::atomic_bool& stop_loading,
}
if (!shader) {
shader = SpecializeShader(unspecialized.code, unspecialized.entries,
- unspecialized.program_type, usage.bindings,
- usage.primitive, true);
+ unspecialized.program_type, usage.variant, true);
}
std::scoped_lock lock(mutex);
@@ -570,18 +584,17 @@ Shader ShaderCacheOpenGL::GetStageProgram(Maxwell::ShaderProgram program) {
memory_manager.GetPointer(program_addr_b));
}
- const u64 unique_identifier = GetUniqueIdentifier(program, program_code, program_code_b);
- const VAddr cpu_addr{*memory_manager.GpuToCpuAddress(program_addr)};
+ const auto unique_identifier = GetUniqueIdentifier(program, program_code, program_code_b);
+ const auto cpu_addr{*memory_manager.GpuToCpuAddress(program_addr)};
+ const ShaderParameters params{disk_cache, precompiled_programs, device, cpu_addr,
+ host_ptr, unique_identifier};
+
const auto found = precompiled_shaders.find(unique_identifier);
- if (found != precompiled_shaders.end()) {
- // Create a shader from the cache
- shader = std::make_shared<CachedShader>(cpu_addr, unique_identifier, program, disk_cache,
- precompiled_programs, found->second, host_ptr);
+ if (found == precompiled_shaders.end()) {
+ shader = CachedShader::CreateStageFromMemory(params, program, std::move(program_code),
+ std::move(program_code_b));
} else {
- // Create a shader from guest memory
- shader = std::make_shared<CachedShader>(
- device, cpu_addr, unique_identifier, program, disk_cache, precompiled_programs,
- std::move(program_code), std::move(program_code_b), host_ptr);
+ shader = CachedShader::CreateStageFromCache(params, program, found->second);
}
Register(shader);
diff --git a/src/video_core/renderer_opengl/gl_shader_cache.h b/src/video_core/renderer_opengl/gl_shader_cache.h
index 09bd0761d..bbb53cdf4 100644
--- a/src/video_core/renderer_opengl/gl_shader_cache.h
+++ b/src/video_core/renderer_opengl/gl_shader_cache.h
@@ -6,6 +6,7 @@
#include <array>
#include <atomic>
+#include <bitset>
#include <memory>
#include <set>
#include <tuple>
@@ -41,17 +42,24 @@ using Maxwell = Tegra::Engines::Maxwell3D::Regs;
using PrecompiledPrograms = std::unordered_map<ShaderDiskCacheUsage, CachedProgram>;
using PrecompiledShaders = std::unordered_map<u64, GLShader::ProgramResult>;
+struct ShaderParameters {
+ ShaderDiskCacheOpenGL& disk_cache;
+ const PrecompiledPrograms& precompiled_programs;
+ const Device& device;
+ VAddr cpu_addr;
+ u8* host_ptr;
+ u64 unique_identifier;
+};
+
class CachedShader final : public RasterizerCacheObject {
public:
- explicit CachedShader(const Device& device, VAddr cpu_addr, u64 unique_identifier,
- Maxwell::ShaderProgram program_type, ShaderDiskCacheOpenGL& disk_cache,
- const PrecompiledPrograms& precompiled_programs,
- ProgramCode&& program_code, ProgramCode&& program_code_b, u8* host_ptr);
+ static Shader CreateStageFromMemory(const ShaderParameters& params,
+ Maxwell::ShaderProgram program_type,
+ ProgramCode&& program_code, ProgramCode&& program_code_b);
- explicit CachedShader(VAddr cpu_addr, u64 unique_identifier,
- Maxwell::ShaderProgram program_type, ShaderDiskCacheOpenGL& disk_cache,
- const PrecompiledPrograms& precompiled_programs,
- GLShader::ProgramResult result, u8* host_ptr);
+ static Shader CreateStageFromCache(const ShaderParameters& params,
+ Maxwell::ShaderProgram program_type,
+ GLShader::ProgramResult result);
VAddr GetCpuAddr() const override {
return cpu_addr;
@@ -67,10 +75,12 @@ public:
}
/// Gets the GL program handle for the shader
- std::tuple<GLuint, BaseBindings> GetProgramHandle(GLenum primitive_mode,
- BaseBindings base_bindings);
+ std::tuple<GLuint, BaseBindings> GetProgramHandle(const ProgramVariant& variant);
private:
+ explicit CachedShader(const ShaderParameters& params, Maxwell::ShaderProgram program_type,
+ GLShader::ProgramResult result);
+
// Geometry programs. These are needed because GLSL needs an input topology but it's not
// declared by the hardware. Workaround this issue by generating a different shader per input
// topology class.
@@ -82,15 +92,14 @@ private:
CachedProgram triangles_adjacency;
};
- GLuint GetGeometryShader(GLenum primitive_mode, BaseBindings base_bindings);
+ GLuint GetGeometryShader(const ProgramVariant& variant);
/// Generates a geometry shader or returns one that already exists.
- GLuint LazyGeometryProgram(CachedProgram& target_program, BaseBindings base_bindings,
- GLenum primitive_mode);
+ GLuint LazyGeometryProgram(CachedProgram& target_program, const ProgramVariant& variant);
- CachedProgram TryLoadProgram(GLenum primitive_mode, BaseBindings base_bindings) const;
+ CachedProgram TryLoadProgram(const ProgramVariant& variant) const;
- ShaderDiskCacheUsage GetUsage(GLenum primitive_mode, BaseBindings base_bindings) const;
+ ShaderDiskCacheUsage GetUsage(const ProgramVariant& variant) const;
u8* host_ptr{};
VAddr cpu_addr{};
@@ -99,13 +108,12 @@ private:
ShaderDiskCacheOpenGL& disk_cache;
const PrecompiledPrograms& precompiled_programs;
- std::size_t shader_length{};
GLShader::ShaderEntries entries;
-
std::string code;
+ std::size_t shader_length{};
- std::unordered_map<BaseBindings, CachedProgram> programs;
- std::unordered_map<BaseBindings, GeometryPrograms> geometry_programs;
+ std::unordered_map<ProgramVariant, CachedProgram> programs;
+ std::unordered_map<ProgramVariant, GeometryPrograms> geometry_programs;
std::unordered_map<u32, GLuint> cbuf_resource_cache;
std::unordered_map<u32, GLuint> gmem_resource_cache;
diff --git a/src/video_core/renderer_opengl/gl_shader_decompiler.cpp b/src/video_core/renderer_opengl/gl_shader_decompiler.cpp
index 7dc2e0560..5f2f1510c 100644
--- a/src/video_core/renderer_opengl/gl_shader_decompiler.cpp
+++ b/src/video_core/renderer_opengl/gl_shader_decompiler.cpp
@@ -180,6 +180,7 @@ public:
DeclareGlobalMemory();
DeclareSamplers();
DeclarePhysicalAttributeReader();
+ DeclareImages();
code.AddLine("void execute_{}() {{", suffix);
++code.scope;
@@ -234,6 +235,9 @@ public:
for (const auto& sampler : ir.GetSamplers()) {
entries.samplers.emplace_back(sampler);
}
+ for (const auto& image : ir.GetImages()) {
+ entries.images.emplace_back(image);
+ }
for (const auto& gmem_pair : ir.GetGlobalMemory()) {
const auto& [base, usage] = gmem_pair;
entries.global_memory_entries.emplace_back(base.cbuf_index, base.cbuf_offset,
@@ -453,9 +457,13 @@ private:
void DeclareSamplers() {
const auto& samplers = ir.GetSamplers();
for (const auto& sampler : samplers) {
- std::string sampler_type = [&sampler] {
+ const std::string name{GetSampler(sampler)};
+ const std::string description{"layout (binding = SAMPLER_BINDING_" +
+ std::to_string(sampler.GetIndex()) + ") uniform"};
+ std::string sampler_type = [&]() {
switch (sampler.GetType()) {
case Tegra::Shader::TextureType::Texture1D:
+ // Special cased, read below.
return "sampler1D";
case Tegra::Shader::TextureType::Texture2D:
return "sampler2D";
@@ -475,8 +483,19 @@ private:
sampler_type += "Shadow";
}
- code.AddLine("layout (binding = SAMPLER_BINDING_{}) uniform {} {};", sampler.GetIndex(),
- sampler_type, GetSampler(sampler));
+ if (sampler.GetType() == Tegra::Shader::TextureType::Texture1D) {
+ // 1D textures can be aliased to texture buffers, hide the declarations behind a
+ // preprocessor flag and use one or the other from the GPU state. This has to be
+ // done because shaders don't have enough information to determine the texture type.
+ EmitIfdefIsBuffer(sampler);
+ code.AddLine("{} samplerBuffer {};", description, name);
+ code.AddLine("#else");
+ code.AddLine("{} {} {};", description, sampler_type, name);
+ code.AddLine("#endif");
+ } else {
+ // The other texture types (2D, 3D and cubes) don't have this issue.
+ code.AddLine("{} {} {};", description, sampler_type, name);
+ }
}
if (!samplers.empty()) {
code.AddNewLine();
@@ -516,6 +535,37 @@ private:
code.AddNewLine();
}
+ void DeclareImages() {
+ const auto& images{ir.GetImages()};
+ for (const auto& image : images) {
+ const std::string image_type = [&]() {
+ switch (image.GetType()) {
+ case Tegra::Shader::ImageType::Texture1D:
+ return "image1D";
+ case Tegra::Shader::ImageType::TextureBuffer:
+ return "bufferImage";
+ case Tegra::Shader::ImageType::Texture1DArray:
+ return "image1DArray";
+ case Tegra::Shader::ImageType::Texture2D:
+ return "image2D";
+ case Tegra::Shader::ImageType::Texture2DArray:
+ return "image2DArray";
+ case Tegra::Shader::ImageType::Texture3D:
+ return "image3D";
+ default:
+ UNREACHABLE();
+ return "image1D";
+ }
+ }();
+ code.AddLine("layout (binding = IMAGE_BINDING_{}) coherent volatile writeonly uniform "
+ "{} {};",
+ image.GetIndex(), image_type, GetImage(image));
+ }
+ if (!images.empty()) {
+ code.AddNewLine();
+ }
+ }
+
void VisitBlock(const NodeBlock& bb) {
for (const auto& node : bb) {
if (const std::string expr = Visit(node); !expr.empty()) {
@@ -1439,13 +1489,61 @@ private:
else if (next < count)
expr += ", ";
}
+
+ // Store a copy of the expression without the lod to be used with texture buffers
+ std::string expr_buffer = expr;
+
if (meta->lod) {
expr += ", ";
expr += CastOperand(Visit(meta->lod), Type::Int);
}
expr += ')';
+ expr += GetSwizzle(meta->element);
- return expr + GetSwizzle(meta->element);
+ expr_buffer += ')';
+ expr_buffer += GetSwizzle(meta->element);
+
+ const std::string tmp{code.GenerateTemporary()};
+ EmitIfdefIsBuffer(meta->sampler);
+ code.AddLine("float {} = {};", tmp, expr_buffer);
+ code.AddLine("#else");
+ code.AddLine("float {} = {};", tmp, expr);
+ code.AddLine("#endif");
+
+ return tmp;
+ }
+
+ std::string ImageStore(Operation operation) {
+ constexpr std::array<const char*, 4> constructors{"int(", "ivec2(", "ivec3(", "ivec4("};
+ const auto meta{std::get<MetaImage>(operation.GetMeta())};
+
+ std::string expr = "imageStore(";
+ expr += GetImage(meta.image);
+ expr += ", ";
+
+ const std::size_t coords_count{operation.GetOperandsCount()};
+ expr += constructors.at(coords_count - 1);
+ for (std::size_t i = 0; i < coords_count; ++i) {
+ expr += VisitOperand(operation, i, Type::Int);
+ if (i + 1 < coords_count) {
+ expr += ", ";
+ }
+ }
+ expr += "), ";
+
+ const std::size_t values_count{meta.values.size()};
+ UNIMPLEMENTED_IF(values_count != 4);
+ expr += "vec4(";
+ for (std::size_t i = 0; i < values_count; ++i) {
+ expr += Visit(meta.values.at(i));
+ if (i + 1 < values_count) {
+ expr += ", ";
+ }
+ }
+ expr += "));";
+
+ code.AddLine(expr);
+ return {};
}
std::string Branch(Operation operation) {
@@ -1688,6 +1786,8 @@ private:
&GLSLDecompiler::TextureQueryLod,
&GLSLDecompiler::TexelFetch,
+ &GLSLDecompiler::ImageStore,
+
&GLSLDecompiler::Branch,
&GLSLDecompiler::PushFlowStack,
&GLSLDecompiler::PopFlowStack,
@@ -1756,6 +1856,14 @@ private:
return GetDeclarationWithSuffix(static_cast<u32>(sampler.GetIndex()), "sampler");
}
+ std::string GetImage(const Image& image) const {
+ return GetDeclarationWithSuffix(static_cast<u32>(image.GetIndex()), "image");
+ }
+
+ void EmitIfdefIsBuffer(const Sampler& sampler) {
+ code.AddLine("#ifdef SAMPLER_{}_IS_BUFFER", sampler.GetIndex());
+ }
+
std::string GetDeclarationWithSuffix(u32 index, const std::string& name) const {
return fmt::format("{}_{}_{}", name, index, suffix);
}
diff --git a/src/video_core/renderer_opengl/gl_shader_decompiler.h b/src/video_core/renderer_opengl/gl_shader_decompiler.h
index c1569e737..14d11c7fc 100644
--- a/src/video_core/renderer_opengl/gl_shader_decompiler.h
+++ b/src/video_core/renderer_opengl/gl_shader_decompiler.h
@@ -27,6 +27,7 @@ struct ShaderEntries;
using Maxwell = Tegra::Engines::Maxwell3D::Regs;
using ProgramResult = std::pair<std::string, ShaderEntries>;
using SamplerEntry = VideoCommon::Shader::Sampler;
+using ImageEntry = VideoCommon::Shader::Image;
class ConstBufferEntry : public VideoCommon::Shader::ConstBuffer {
public:
@@ -74,6 +75,7 @@ struct ShaderEntries {
std::vector<ConstBufferEntry> const_buffers;
std::vector<SamplerEntry> samplers;
std::vector<SamplerEntry> bindless_samplers;
+ std::vector<ImageEntry> images;
std::vector<GlobalMemoryEntry> global_memory_entries;
std::array<bool, Maxwell::NumClipDistances> clip_distances{};
std::size_t shader_length{};
diff --git a/src/video_core/renderer_opengl/gl_shader_disk_cache.cpp b/src/video_core/renderer_opengl/gl_shader_disk_cache.cpp
index ee4a45ca2..10688397b 100644
--- a/src/video_core/renderer_opengl/gl_shader_disk_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_shader_disk_cache.cpp
@@ -34,11 +34,11 @@ enum class PrecompiledEntryKind : u32 {
Dump,
};
-constexpr u32 NativeVersion = 1;
+constexpr u32 NativeVersion = 4;
// Making sure sizes doesn't change by accident
-static_assert(sizeof(BaseBindings) == 12);
-static_assert(sizeof(ShaderDiskCacheUsage) == 24);
+static_assert(sizeof(BaseBindings) == 16);
+static_assert(sizeof(ShaderDiskCacheUsage) == 40);
namespace {
@@ -332,11 +332,28 @@ std::optional<ShaderDiskCacheDecompiled> ShaderDiskCacheOpenGL::LoadDecompiledEn
static_cast<Tegra::Shader::TextureType>(type), is_array, is_shadow, is_bindless);
}
+ u32 images_count{};
+ if (!LoadObjectFromPrecompiled(images_count)) {
+ return {};
+ }
+ for (u32 i = 0; i < images_count; ++i) {
+ u64 offset{};
+ u64 index{};
+ u32 type{};
+ u8 is_bindless{};
+ if (!LoadObjectFromPrecompiled(offset) || !LoadObjectFromPrecompiled(index) ||
+ !LoadObjectFromPrecompiled(type) || !LoadObjectFromPrecompiled(is_bindless)) {
+ return {};
+ }
+ entry.entries.images.emplace_back(
+ static_cast<std::size_t>(offset), static_cast<std::size_t>(index),
+ static_cast<Tegra::Shader::ImageType>(type), is_bindless != 0);
+ }
+
u32 global_memory_count{};
if (!LoadObjectFromPrecompiled(global_memory_count)) {
return {};
}
-
for (u32 i = 0; i < global_memory_count; ++i) {
u32 cbuf_index{};
u32 cbuf_offset{};
@@ -360,7 +377,6 @@ std::optional<ShaderDiskCacheDecompiled> ShaderDiskCacheOpenGL::LoadDecompiledEn
if (!LoadObjectFromPrecompiled(shader_length)) {
return {};
}
-
entry.entries.shader_length = static_cast<std::size_t>(shader_length);
return entry;
@@ -400,6 +416,18 @@ bool ShaderDiskCacheOpenGL::SaveDecompiledFile(u64 unique_identifier, const std:
}
}
+ if (!SaveObjectToPrecompiled(static_cast<u32>(entries.images.size()))) {
+ return false;
+ }
+ for (const auto& image : entries.images) {
+ if (!SaveObjectToPrecompiled(static_cast<u64>(image.GetOffset())) ||
+ !SaveObjectToPrecompiled(static_cast<u64>(image.GetIndex())) ||
+ !SaveObjectToPrecompiled(static_cast<u32>(image.GetType())) ||
+ !SaveObjectToPrecompiled(static_cast<u8>(image.IsBindless() ? 1 : 0))) {
+ return false;
+ }
+ }
+
if (!SaveObjectToPrecompiled(static_cast<u32>(entries.global_memory_entries.size()))) {
return false;
}
diff --git a/src/video_core/renderer_opengl/gl_shader_disk_cache.h b/src/video_core/renderer_opengl/gl_shader_disk_cache.h
index ecd72ba58..4f296dda6 100644
--- a/src/video_core/renderer_opengl/gl_shader_disk_cache.h
+++ b/src/video_core/renderer_opengl/gl_shader_disk_cache.h
@@ -4,6 +4,7 @@
#pragma once
+#include <bitset>
#include <optional>
#include <string>
#include <tuple>
@@ -30,22 +31,26 @@ class IOFile;
namespace OpenGL {
-using ProgramCode = std::vector<u64>;
-using Maxwell = Tegra::Engines::Maxwell3D::Regs;
-
struct ShaderDiskCacheUsage;
struct ShaderDiskCacheDump;
using ShaderDumpsMap = std::unordered_map<ShaderDiskCacheUsage, ShaderDiskCacheDump>;
-/// Allocated bindings used by an OpenGL shader program
+using ProgramCode = std::vector<u64>;
+using Maxwell = Tegra::Engines::Maxwell3D::Regs;
+
+using TextureBufferUsage = std::bitset<64>;
+
+/// Allocated bindings used by an OpenGL shader program.
struct BaseBindings {
u32 cbuf{};
u32 gmem{};
u32 sampler{};
+ u32 image{};
bool operator==(const BaseBindings& rhs) const {
- return std::tie(cbuf, gmem, sampler) == std::tie(rhs.cbuf, rhs.gmem, rhs.sampler);
+ return std::tie(cbuf, gmem, sampler, image) ==
+ std::tie(rhs.cbuf, rhs.gmem, rhs.sampler, rhs.image);
}
bool operator!=(const BaseBindings& rhs) const {
@@ -53,15 +58,29 @@ struct BaseBindings {
}
};
-/// Describes how a shader is used
+/// Describes the different variants a single program can be compiled.
+struct ProgramVariant {
+ BaseBindings base_bindings;
+ GLenum primitive_mode{};
+ TextureBufferUsage texture_buffer_usage{};
+
+ bool operator==(const ProgramVariant& rhs) const {
+ return std::tie(base_bindings, primitive_mode, texture_buffer_usage) ==
+ std::tie(rhs.base_bindings, rhs.primitive_mode, rhs.texture_buffer_usage);
+ }
+
+ bool operator!=(const ProgramVariant& rhs) const {
+ return !operator==(rhs);
+ }
+};
+
+/// Describes how a shader is used.
struct ShaderDiskCacheUsage {
u64 unique_identifier{};
- BaseBindings bindings;
- GLenum primitive{};
+ ProgramVariant variant;
bool operator==(const ShaderDiskCacheUsage& rhs) const {
- return std::tie(unique_identifier, bindings, primitive) ==
- std::tie(rhs.unique_identifier, rhs.bindings, rhs.primitive);
+ return std::tie(unique_identifier, variant) == std::tie(rhs.unique_identifier, rhs.variant);
}
bool operator!=(const ShaderDiskCacheUsage& rhs) const {
@@ -76,7 +95,19 @@ namespace std {
template <>
struct hash<OpenGL::BaseBindings> {
std::size_t operator()(const OpenGL::BaseBindings& bindings) const noexcept {
- return bindings.cbuf | bindings.gmem << 8 | bindings.sampler << 16;
+ return static_cast<std::size_t>(bindings.cbuf) ^
+ (static_cast<std::size_t>(bindings.gmem) << 8) ^
+ (static_cast<std::size_t>(bindings.sampler) << 16) ^
+ (static_cast<std::size_t>(bindings.image) << 24);
+ }
+};
+
+template <>
+struct hash<OpenGL::ProgramVariant> {
+ std::size_t operator()(const OpenGL::ProgramVariant& variant) const noexcept {
+ return std::hash<OpenGL::BaseBindings>()(variant.base_bindings) ^
+ std::hash<OpenGL::TextureBufferUsage>()(variant.texture_buffer_usage) ^
+ (static_cast<std::size_t>(variant.primitive_mode) << 6);
}
};
@@ -84,7 +115,7 @@ template <>
struct hash<OpenGL::ShaderDiskCacheUsage> {
std::size_t operator()(const OpenGL::ShaderDiskCacheUsage& usage) const noexcept {
return static_cast<std::size_t>(usage.unique_identifier) ^
- std::hash<OpenGL::BaseBindings>()(usage.bindings) ^ usage.primitive << 16;
+ std::hash<OpenGL::ProgramVariant>()(usage.variant);
}
};
@@ -275,26 +306,17 @@ private:
return LoadArrayFromPrecompiled(&object, 1);
}
- bool LoadObjectFromPrecompiled(bool& object) {
- u8 value;
- const bool read_ok = LoadArrayFromPrecompiled(&value, 1);
- if (!read_ok) {
- return false;
- }
-
- object = value != 0;
- return true;
- }
-
- // Core system
Core::System& system;
- // Stored transferable shaders
- std::map<u64, std::unordered_set<ShaderDiskCacheUsage>> transferable;
- // Stores whole precompiled cache which will be read from/saved to the precompiled cache file
+
+ // Stores whole precompiled cache which will be read from or saved to the precompiled chache
+ // file
FileSys::VectorVfsFile precompiled_cache_virtual_file;
// Stores the current offset of the precompiled cache file for IO purposes
std::size_t precompiled_cache_virtual_file_offset = 0;
+ // Stored transferable shaders
+ std::unordered_map<u64, std::unordered_set<ShaderDiskCacheUsage>> transferable;
+
// The cache has been loaded at boot
bool tried_to_load{};
};
diff --git a/src/video_core/renderer_opengl/gl_stream_buffer.cpp b/src/video_core/renderer_opengl/gl_stream_buffer.cpp
index d0b14b3f6..35ba334e4 100644
--- a/src/video_core/renderer_opengl/gl_stream_buffer.cpp
+++ b/src/video_core/renderer_opengl/gl_stream_buffer.cpp
@@ -15,7 +15,8 @@ MICROPROFILE_DEFINE(OpenGL_StreamBuffer, "OpenGL", "Stream Buffer Orphaning",
namespace OpenGL {
-OGLStreamBuffer::OGLStreamBuffer(GLsizeiptr size, bool vertex_data_usage, bool prefer_coherent)
+OGLStreamBuffer::OGLStreamBuffer(GLsizeiptr size, bool vertex_data_usage, bool prefer_coherent,
+ bool use_persistent)
: buffer_size(size) {
gl_buffer.Create();
@@ -29,7 +30,7 @@ OGLStreamBuffer::OGLStreamBuffer(GLsizeiptr size, bool vertex_data_usage, bool p
allocate_size *= 2;
}
- if (GLAD_GL_ARB_buffer_storage) {
+ if (use_persistent) {
persistent = true;
coherent = prefer_coherent;
const GLbitfield flags =
diff --git a/src/video_core/renderer_opengl/gl_stream_buffer.h b/src/video_core/renderer_opengl/gl_stream_buffer.h
index 3d18ecb4d..f8383cbd4 100644
--- a/src/video_core/renderer_opengl/gl_stream_buffer.h
+++ b/src/video_core/renderer_opengl/gl_stream_buffer.h
@@ -13,7 +13,8 @@ namespace OpenGL {
class OGLStreamBuffer : private NonCopyable {
public:
- explicit OGLStreamBuffer(GLsizeiptr size, bool vertex_data_usage, bool prefer_coherent = false);
+ explicit OGLStreamBuffer(GLsizeiptr size, bool vertex_data_usage, bool prefer_coherent = false,
+ bool use_persistent = true);
~OGLStreamBuffer();
GLuint GetHandle() const;
diff --git a/src/video_core/renderer_opengl/gl_texture_cache.cpp b/src/video_core/renderer_opengl/gl_texture_cache.cpp
new file mode 100644
index 000000000..08ae1a429
--- /dev/null
+++ b/src/video_core/renderer_opengl/gl_texture_cache.cpp
@@ -0,0 +1,614 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include "common/assert.h"
+#include "common/bit_util.h"
+#include "common/common_types.h"
+#include "common/microprofile.h"
+#include "common/scope_exit.h"
+#include "core/core.h"
+#include "video_core/morton.h"
+#include "video_core/renderer_opengl/gl_resource_manager.h"
+#include "video_core/renderer_opengl/gl_state.h"
+#include "video_core/renderer_opengl/gl_texture_cache.h"
+#include "video_core/renderer_opengl/utils.h"
+#include "video_core/texture_cache/surface_base.h"
+#include "video_core/texture_cache/texture_cache.h"
+#include "video_core/textures/convert.h"
+#include "video_core/textures/texture.h"
+
+namespace OpenGL {
+
+using Tegra::Texture::SwizzleSource;
+using VideoCore::MortonSwizzleMode;
+
+using VideoCore::Surface::ComponentType;
+using VideoCore::Surface::PixelFormat;
+using VideoCore::Surface::SurfaceCompression;
+using VideoCore::Surface::SurfaceTarget;
+using VideoCore::Surface::SurfaceType;
+
+MICROPROFILE_DEFINE(OpenGL_Texture_Upload, "OpenGL", "Texture Upload", MP_RGB(128, 192, 128));
+MICROPROFILE_DEFINE(OpenGL_Texture_Download, "OpenGL", "Texture Download", MP_RGB(128, 192, 128));
+
+namespace {
+
+struct FormatTuple {
+ GLint internal_format;
+ GLenum format;
+ GLenum type;
+ ComponentType component_type;
+ bool compressed;
+};
+
+constexpr std::array<FormatTuple, VideoCore::Surface::MaxPixelFormat> tex_format_tuples = {{
+ {GL_RGBA8, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8_REV, ComponentType::UNorm, false}, // ABGR8U
+ {GL_RGBA8, GL_RGBA, GL_BYTE, ComponentType::SNorm, false}, // ABGR8S
+ {GL_RGBA8UI, GL_RGBA_INTEGER, GL_UNSIGNED_BYTE, ComponentType::UInt, false}, // ABGR8UI
+ {GL_RGB565, GL_RGB, GL_UNSIGNED_SHORT_5_6_5_REV, ComponentType::UNorm, false}, // B5G6R5U
+ {GL_RGB10_A2, GL_RGBA, GL_UNSIGNED_INT_2_10_10_10_REV, ComponentType::UNorm,
+ false}, // A2B10G10R10U
+ {GL_RGB5_A1, GL_RGBA, GL_UNSIGNED_SHORT_1_5_5_5_REV, ComponentType::UNorm, false}, // A1B5G5R5U
+ {GL_R8, GL_RED, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // R8U
+ {GL_R8UI, GL_RED_INTEGER, GL_UNSIGNED_BYTE, ComponentType::UInt, false}, // R8UI
+ {GL_RGBA16F, GL_RGBA, GL_HALF_FLOAT, ComponentType::Float, false}, // RGBA16F
+ {GL_RGBA16, GL_RGBA, GL_UNSIGNED_SHORT, ComponentType::UNorm, false}, // RGBA16U
+ {GL_RGBA16UI, GL_RGBA_INTEGER, GL_UNSIGNED_SHORT, ComponentType::UInt, false}, // RGBA16UI
+ {GL_R11F_G11F_B10F, GL_RGB, GL_UNSIGNED_INT_10F_11F_11F_REV, ComponentType::Float,
+ false}, // R11FG11FB10F
+ {GL_RGBA32UI, GL_RGBA_INTEGER, GL_UNSIGNED_INT, ComponentType::UInt, false}, // RGBA32UI
+ {GL_COMPRESSED_RGBA_S3TC_DXT1_EXT, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, ComponentType::UNorm,
+ true}, // DXT1
+ {GL_COMPRESSED_RGBA_S3TC_DXT3_EXT, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, ComponentType::UNorm,
+ true}, // DXT23
+ {GL_COMPRESSED_RGBA_S3TC_DXT5_EXT, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, ComponentType::UNorm,
+ true}, // DXT45
+ {GL_COMPRESSED_RED_RGTC1, GL_RED, GL_UNSIGNED_INT_8_8_8_8, ComponentType::UNorm, true}, // DXN1
+ {GL_COMPRESSED_RG_RGTC2, GL_RG, GL_UNSIGNED_INT_8_8_8_8, ComponentType::UNorm,
+ true}, // DXN2UNORM
+ {GL_COMPRESSED_SIGNED_RG_RGTC2, GL_RG, GL_INT, ComponentType::SNorm, true}, // DXN2SNORM
+ {GL_COMPRESSED_RGBA_BPTC_UNORM, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, ComponentType::UNorm,
+ true}, // BC7U
+ {GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT, GL_RGB, GL_UNSIGNED_INT_8_8_8_8, ComponentType::Float,
+ true}, // BC6H_UF16
+ {GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT, GL_RGB, GL_UNSIGNED_INT_8_8_8_8, ComponentType::Float,
+ true}, // BC6H_SF16
+ {GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_4X4
+ {GL_RGBA8, GL_BGRA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // BGRA8
+ {GL_RGBA32F, GL_RGBA, GL_FLOAT, ComponentType::Float, false}, // RGBA32F
+ {GL_RG32F, GL_RG, GL_FLOAT, ComponentType::Float, false}, // RG32F
+ {GL_R32F, GL_RED, GL_FLOAT, ComponentType::Float, false}, // R32F
+ {GL_R16F, GL_RED, GL_HALF_FLOAT, ComponentType::Float, false}, // R16F
+ {GL_R16, GL_RED, GL_UNSIGNED_SHORT, ComponentType::UNorm, false}, // R16U
+ {GL_R16_SNORM, GL_RED, GL_SHORT, ComponentType::SNorm, false}, // R16S
+ {GL_R16UI, GL_RED_INTEGER, GL_UNSIGNED_SHORT, ComponentType::UInt, false}, // R16UI
+ {GL_R16I, GL_RED_INTEGER, GL_SHORT, ComponentType::SInt, false}, // R16I
+ {GL_RG16, GL_RG, GL_UNSIGNED_SHORT, ComponentType::UNorm, false}, // RG16
+ {GL_RG16F, GL_RG, GL_HALF_FLOAT, ComponentType::Float, false}, // RG16F
+ {GL_RG16UI, GL_RG_INTEGER, GL_UNSIGNED_SHORT, ComponentType::UInt, false}, // RG16UI
+ {GL_RG16I, GL_RG_INTEGER, GL_SHORT, ComponentType::SInt, false}, // RG16I
+ {GL_RG16_SNORM, GL_RG, GL_SHORT, ComponentType::SNorm, false}, // RG16S
+ {GL_RGB32F, GL_RGB, GL_FLOAT, ComponentType::Float, false}, // RGB32F
+ {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8_REV, ComponentType::UNorm,
+ false}, // RGBA8_SRGB
+ {GL_RG8, GL_RG, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // RG8U
+ {GL_RG8, GL_RG, GL_BYTE, ComponentType::SNorm, false}, // RG8S
+ {GL_RG32UI, GL_RG_INTEGER, GL_UNSIGNED_INT, ComponentType::UInt, false}, // RG32UI
+ {GL_R32UI, GL_RED_INTEGER, GL_UNSIGNED_INT, ComponentType::UInt, false}, // R32UI
+ {GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_8X8
+ {GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_8X5
+ {GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_5X4
+ {GL_SRGB8_ALPHA8, GL_BGRA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // BGRA8
+ // Compressed sRGB formats
+ {GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, ComponentType::UNorm,
+ true}, // DXT1_SRGB
+ {GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, ComponentType::UNorm,
+ true}, // DXT23_SRGB
+ {GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, ComponentType::UNorm,
+ true}, // DXT45_SRGB
+ {GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, ComponentType::UNorm,
+ true}, // BC7U_SRGB
+ {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_4X4_SRGB
+ {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_8X8_SRGB
+ {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_8X5_SRGB
+ {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_5X4_SRGB
+ {GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_5X5
+ {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_5X5_SRGB
+ {GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_10X8
+ {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_10X8_SRGB
+
+ // Depth formats
+ {GL_DEPTH_COMPONENT32F, GL_DEPTH_COMPONENT, GL_FLOAT, ComponentType::Float, false}, // Z32F
+ {GL_DEPTH_COMPONENT16, GL_DEPTH_COMPONENT, GL_UNSIGNED_SHORT, ComponentType::UNorm,
+ false}, // Z16
+
+ // DepthStencil formats
+ {GL_DEPTH24_STENCIL8, GL_DEPTH_STENCIL, GL_UNSIGNED_INT_24_8, ComponentType::UNorm,
+ false}, // Z24S8
+ {GL_DEPTH24_STENCIL8, GL_DEPTH_STENCIL, GL_UNSIGNED_INT_24_8, ComponentType::UNorm,
+ false}, // S8Z24
+ {GL_DEPTH32F_STENCIL8, GL_DEPTH_STENCIL, GL_FLOAT_32_UNSIGNED_INT_24_8_REV,
+ ComponentType::Float, false}, // Z32FS8
+}};
+
+const FormatTuple& GetFormatTuple(PixelFormat pixel_format, ComponentType component_type) {
+ ASSERT(static_cast<std::size_t>(pixel_format) < tex_format_tuples.size());
+ const auto& format{tex_format_tuples[static_cast<std::size_t>(pixel_format)]};
+ ASSERT(component_type == format.component_type);
+ return format;
+}
+
+GLenum GetTextureTarget(const SurfaceTarget& target) {
+ switch (target) {
+ case SurfaceTarget::TextureBuffer:
+ return GL_TEXTURE_BUFFER;
+ case SurfaceTarget::Texture1D:
+ return GL_TEXTURE_1D;
+ case SurfaceTarget::Texture2D:
+ return GL_TEXTURE_2D;
+ case SurfaceTarget::Texture3D:
+ return GL_TEXTURE_3D;
+ case SurfaceTarget::Texture1DArray:
+ return GL_TEXTURE_1D_ARRAY;
+ case SurfaceTarget::Texture2DArray:
+ return GL_TEXTURE_2D_ARRAY;
+ case SurfaceTarget::TextureCubemap:
+ return GL_TEXTURE_CUBE_MAP;
+ case SurfaceTarget::TextureCubeArray:
+ return GL_TEXTURE_CUBE_MAP_ARRAY;
+ }
+ UNREACHABLE();
+ return {};
+}
+
+GLint GetSwizzleSource(SwizzleSource source) {
+ switch (source) {
+ case SwizzleSource::Zero:
+ return GL_ZERO;
+ case SwizzleSource::R:
+ return GL_RED;
+ case SwizzleSource::G:
+ return GL_GREEN;
+ case SwizzleSource::B:
+ return GL_BLUE;
+ case SwizzleSource::A:
+ return GL_ALPHA;
+ case SwizzleSource::OneInt:
+ case SwizzleSource::OneFloat:
+ return GL_ONE;
+ }
+ UNREACHABLE();
+ return GL_NONE;
+}
+
+void ApplyTextureDefaults(const SurfaceParams& params, GLuint texture) {
+ glTextureParameteri(texture, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ glTextureParameteri(texture, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ glTextureParameteri(texture, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ glTextureParameteri(texture, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ glTextureParameteri(texture, GL_TEXTURE_MAX_LEVEL, params.num_levels - 1);
+ if (params.num_levels == 1) {
+ glTextureParameterf(texture, GL_TEXTURE_LOD_BIAS, 1000.0f);
+ }
+}
+
+OGLTexture CreateTexture(const SurfaceParams& params, GLenum target, GLenum internal_format,
+ OGLBuffer& texture_buffer) {
+ OGLTexture texture;
+ texture.Create(target);
+
+ switch (params.target) {
+ case SurfaceTarget::Texture1D:
+ glTextureStorage1D(texture.handle, params.emulated_levels, internal_format, params.width);
+ break;
+ case SurfaceTarget::TextureBuffer:
+ texture_buffer.Create();
+ glNamedBufferStorage(texture_buffer.handle, params.width * params.GetBytesPerPixel(),
+ nullptr, GL_DYNAMIC_STORAGE_BIT);
+ glTextureBuffer(texture.handle, internal_format, texture_buffer.handle);
+ case SurfaceTarget::Texture2D:
+ case SurfaceTarget::TextureCubemap:
+ glTextureStorage2D(texture.handle, params.emulated_levels, internal_format, params.width,
+ params.height);
+ break;
+ case SurfaceTarget::Texture3D:
+ case SurfaceTarget::Texture2DArray:
+ case SurfaceTarget::TextureCubeArray:
+ glTextureStorage3D(texture.handle, params.emulated_levels, internal_format, params.width,
+ params.height, params.depth);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ ApplyTextureDefaults(params, texture.handle);
+
+ return texture;
+}
+
+} // Anonymous namespace
+
+CachedSurface::CachedSurface(const GPUVAddr gpu_addr, const SurfaceParams& params)
+ : VideoCommon::SurfaceBase<View>(gpu_addr, params) {
+ const auto& tuple{GetFormatTuple(params.pixel_format, params.component_type)};
+ internal_format = tuple.internal_format;
+ format = tuple.format;
+ type = tuple.type;
+ is_compressed = tuple.compressed;
+ target = GetTextureTarget(params.target);
+ texture = CreateTexture(params, target, internal_format, texture_buffer);
+ DecorateSurfaceName();
+ main_view = CreateViewInner(
+ ViewParams(params.target, 0, params.is_layered ? params.depth : 1, 0, params.num_levels),
+ true);
+}
+
+CachedSurface::~CachedSurface() = default;
+
+void CachedSurface::DownloadTexture(std::vector<u8>& staging_buffer) {
+ MICROPROFILE_SCOPE(OpenGL_Texture_Download);
+
+ SCOPE_EXIT({ glPixelStorei(GL_PACK_ROW_LENGTH, 0); });
+
+ for (u32 level = 0; level < params.emulated_levels; ++level) {
+ glPixelStorei(GL_PACK_ALIGNMENT, std::min(8U, params.GetRowAlignment(level)));
+ glPixelStorei(GL_PACK_ROW_LENGTH, static_cast<GLint>(params.GetMipWidth(level)));
+ const std::size_t mip_offset = params.GetHostMipmapLevelOffset(level);
+ if (is_compressed) {
+ glGetCompressedTextureImage(texture.handle, level,
+ static_cast<GLsizei>(params.GetHostMipmapSize(level)),
+ staging_buffer.data() + mip_offset);
+ } else {
+ glGetTextureImage(texture.handle, level, format, type,
+ static_cast<GLsizei>(params.GetHostMipmapSize(level)),
+ staging_buffer.data() + mip_offset);
+ }
+ }
+}
+
+void CachedSurface::UploadTexture(const std::vector<u8>& staging_buffer) {
+ MICROPROFILE_SCOPE(OpenGL_Texture_Upload);
+ SCOPE_EXIT({ glPixelStorei(GL_UNPACK_ROW_LENGTH, 0); });
+ for (u32 level = 0; level < params.emulated_levels; ++level) {
+ UploadTextureMipmap(level, staging_buffer);
+ }
+}
+
+void CachedSurface::UploadTextureMipmap(u32 level, const std::vector<u8>& staging_buffer) {
+ glPixelStorei(GL_UNPACK_ALIGNMENT, std::min(8U, params.GetRowAlignment(level)));
+ glPixelStorei(GL_UNPACK_ROW_LENGTH, static_cast<GLint>(params.GetMipWidth(level)));
+
+ auto compression_type = params.GetCompressionType();
+
+ const std::size_t mip_offset = compression_type == SurfaceCompression::Converted
+ ? params.GetConvertedMipmapOffset(level)
+ : params.GetHostMipmapLevelOffset(level);
+ const u8* buffer{staging_buffer.data() + mip_offset};
+ if (is_compressed) {
+ const auto image_size{static_cast<GLsizei>(params.GetHostMipmapSize(level))};
+ switch (params.target) {
+ case SurfaceTarget::Texture2D:
+ glCompressedTextureSubImage2D(texture.handle, level, 0, 0,
+ static_cast<GLsizei>(params.GetMipWidth(level)),
+ static_cast<GLsizei>(params.GetMipHeight(level)),
+ internal_format, image_size, buffer);
+ break;
+ case SurfaceTarget::Texture3D:
+ case SurfaceTarget::Texture2DArray:
+ case SurfaceTarget::TextureCubeArray:
+ glCompressedTextureSubImage3D(texture.handle, level, 0, 0, 0,
+ static_cast<GLsizei>(params.GetMipWidth(level)),
+ static_cast<GLsizei>(params.GetMipHeight(level)),
+ static_cast<GLsizei>(params.GetMipDepth(level)),
+ internal_format, image_size, buffer);
+ break;
+ case SurfaceTarget::TextureCubemap: {
+ const std::size_t layer_size{params.GetHostLayerSize(level)};
+ for (std::size_t face = 0; face < params.depth; ++face) {
+ glCompressedTextureSubImage3D(texture.handle, level, 0, 0, static_cast<GLint>(face),
+ static_cast<GLsizei>(params.GetMipWidth(level)),
+ static_cast<GLsizei>(params.GetMipHeight(level)), 1,
+ internal_format, static_cast<GLsizei>(layer_size),
+ buffer);
+ buffer += layer_size;
+ }
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ switch (params.target) {
+ case SurfaceTarget::Texture1D:
+ glTextureSubImage1D(texture.handle, level, 0, params.GetMipWidth(level), format, type,
+ buffer);
+ break;
+ case SurfaceTarget::TextureBuffer:
+ ASSERT(level == 0);
+ glNamedBufferSubData(texture_buffer.handle, 0,
+ params.GetMipWidth(level) * params.GetBytesPerPixel(), buffer);
+ break;
+ case SurfaceTarget::Texture1DArray:
+ case SurfaceTarget::Texture2D:
+ glTextureSubImage2D(texture.handle, level, 0, 0, params.GetMipWidth(level),
+ params.GetMipHeight(level), format, type, buffer);
+ break;
+ case SurfaceTarget::Texture3D:
+ case SurfaceTarget::Texture2DArray:
+ case SurfaceTarget::TextureCubeArray:
+ glTextureSubImage3D(
+ texture.handle, level, 0, 0, 0, static_cast<GLsizei>(params.GetMipWidth(level)),
+ static_cast<GLsizei>(params.GetMipHeight(level)),
+ static_cast<GLsizei>(params.GetMipDepth(level)), format, type, buffer);
+ break;
+ case SurfaceTarget::TextureCubemap:
+ for (std::size_t face = 0; face < params.depth; ++face) {
+ glTextureSubImage3D(texture.handle, level, 0, 0, static_cast<GLint>(face),
+ params.GetMipWidth(level), params.GetMipHeight(level), 1,
+ format, type, buffer);
+ buffer += params.GetHostLayerSize(level);
+ }
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+}
+
+void CachedSurface::DecorateSurfaceName() {
+ LabelGLObject(GL_TEXTURE, texture.handle, GetGpuAddr(), params.TargetName());
+}
+
+void CachedSurfaceView::DecorateViewName(GPUVAddr gpu_addr, std::string prefix) {
+ LabelGLObject(GL_TEXTURE, texture_view.handle, gpu_addr, prefix);
+}
+
+View CachedSurface::CreateView(const ViewParams& view_key) {
+ return CreateViewInner(view_key, false);
+}
+
+View CachedSurface::CreateViewInner(const ViewParams& view_key, const bool is_proxy) {
+ auto view = std::make_shared<CachedSurfaceView>(*this, view_key, is_proxy);
+ views[view_key] = view;
+ if (!is_proxy)
+ view->DecorateViewName(gpu_addr, params.TargetName() + "V:" + std::to_string(view_count++));
+ return view;
+}
+
+CachedSurfaceView::CachedSurfaceView(CachedSurface& surface, const ViewParams& params,
+ const bool is_proxy)
+ : VideoCommon::ViewBase(params), surface{surface}, is_proxy{is_proxy} {
+ target = GetTextureTarget(params.target);
+ if (!is_proxy) {
+ texture_view = CreateTextureView();
+ }
+ swizzle = EncodeSwizzle(SwizzleSource::R, SwizzleSource::G, SwizzleSource::B, SwizzleSource::A);
+}
+
+CachedSurfaceView::~CachedSurfaceView() = default;
+
+void CachedSurfaceView::Attach(GLenum attachment, GLenum target) const {
+ ASSERT(params.num_layers == 1 && params.num_levels == 1);
+
+ const auto& owner_params = surface.GetSurfaceParams();
+
+ switch (owner_params.target) {
+ case SurfaceTarget::Texture1D:
+ glFramebufferTexture1D(target, attachment, surface.GetTarget(), surface.GetTexture(),
+ params.base_level);
+ break;
+ case SurfaceTarget::Texture2D:
+ glFramebufferTexture2D(target, attachment, surface.GetTarget(), surface.GetTexture(),
+ params.base_level);
+ break;
+ case SurfaceTarget::Texture1DArray:
+ case SurfaceTarget::Texture2DArray:
+ case SurfaceTarget::TextureCubemap:
+ case SurfaceTarget::TextureCubeArray:
+ glFramebufferTextureLayer(target, attachment, surface.GetTexture(), params.base_level,
+ params.base_layer);
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
+}
+
+void CachedSurfaceView::ApplySwizzle(SwizzleSource x_source, SwizzleSource y_source,
+ SwizzleSource z_source, SwizzleSource w_source) {
+ u32 new_swizzle = EncodeSwizzle(x_source, y_source, z_source, w_source);
+ if (new_swizzle == swizzle)
+ return;
+ swizzle = new_swizzle;
+ const std::array<GLint, 4> gl_swizzle = {GetSwizzleSource(x_source), GetSwizzleSource(y_source),
+ GetSwizzleSource(z_source),
+ GetSwizzleSource(w_source)};
+ const GLuint handle = GetTexture();
+ glTextureParameteriv(handle, GL_TEXTURE_SWIZZLE_RGBA, gl_swizzle.data());
+}
+
+OGLTextureView CachedSurfaceView::CreateTextureView() const {
+ const auto& owner_params = surface.GetSurfaceParams();
+ OGLTextureView texture_view;
+ texture_view.Create();
+
+ const GLuint handle{texture_view.handle};
+ const FormatTuple& tuple{
+ GetFormatTuple(owner_params.pixel_format, owner_params.component_type)};
+
+ glTextureView(handle, target, surface.texture.handle, tuple.internal_format, params.base_level,
+ params.num_levels, params.base_layer, params.num_layers);
+
+ ApplyTextureDefaults(owner_params, handle);
+
+ return texture_view;
+}
+
+TextureCacheOpenGL::TextureCacheOpenGL(Core::System& system,
+ VideoCore::RasterizerInterface& rasterizer,
+ const Device& device)
+ : TextureCacheBase{system, rasterizer} {
+ src_framebuffer.Create();
+ dst_framebuffer.Create();
+}
+
+TextureCacheOpenGL::~TextureCacheOpenGL() = default;
+
+Surface TextureCacheOpenGL::CreateSurface(GPUVAddr gpu_addr, const SurfaceParams& params) {
+ return std::make_shared<CachedSurface>(gpu_addr, params);
+}
+
+void TextureCacheOpenGL::ImageCopy(Surface& src_surface, Surface& dst_surface,
+ const VideoCommon::CopyParams& copy_params) {
+ const auto& src_params = src_surface->GetSurfaceParams();
+ const auto& dst_params = dst_surface->GetSurfaceParams();
+ if (src_params.type != dst_params.type) {
+ // A fallback is needed
+ return;
+ }
+ const auto src_handle = src_surface->GetTexture();
+ const auto src_target = src_surface->GetTarget();
+ const auto dst_handle = dst_surface->GetTexture();
+ const auto dst_target = dst_surface->GetTarget();
+ glCopyImageSubData(src_handle, src_target, copy_params.source_level, copy_params.source_x,
+ copy_params.source_y, copy_params.source_z, dst_handle, dst_target,
+ copy_params.dest_level, copy_params.dest_x, copy_params.dest_y,
+ copy_params.dest_z, copy_params.width, copy_params.height,
+ copy_params.depth);
+}
+
+void TextureCacheOpenGL::ImageBlit(View& src_view, View& dst_view,
+ const Tegra::Engines::Fermi2D::Config& copy_config) {
+ const auto& src_params{src_view->GetSurfaceParams()};
+ const auto& dst_params{dst_view->GetSurfaceParams()};
+
+ OpenGLState prev_state{OpenGLState::GetCurState()};
+ SCOPE_EXIT({ prev_state.Apply(); });
+
+ OpenGLState state;
+ state.draw.read_framebuffer = src_framebuffer.handle;
+ state.draw.draw_framebuffer = dst_framebuffer.handle;
+ state.Apply();
+
+ u32 buffers{};
+
+ UNIMPLEMENTED_IF(src_params.target == SurfaceTarget::Texture3D);
+ UNIMPLEMENTED_IF(dst_params.target == SurfaceTarget::Texture3D);
+
+ if (src_params.type == SurfaceType::ColorTexture) {
+ src_view->Attach(GL_COLOR_ATTACHMENT0, GL_READ_FRAMEBUFFER);
+ glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_TEXTURE_2D, 0,
+ 0);
+
+ dst_view->Attach(GL_COLOR_ATTACHMENT0, GL_DRAW_FRAMEBUFFER);
+ glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_TEXTURE_2D, 0,
+ 0);
+
+ buffers = GL_COLOR_BUFFER_BIT;
+ } else if (src_params.type == SurfaceType::Depth) {
+ glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, 0, 0);
+ src_view->Attach(GL_DEPTH_ATTACHMENT, GL_READ_FRAMEBUFFER);
+ glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_STENCIL_ATTACHMENT, GL_TEXTURE_2D, 0, 0);
+
+ glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, 0, 0);
+ dst_view->Attach(GL_DEPTH_ATTACHMENT, GL_DRAW_FRAMEBUFFER);
+ glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_STENCIL_ATTACHMENT, GL_TEXTURE_2D, 0, 0);
+
+ buffers = GL_DEPTH_BUFFER_BIT;
+ } else if (src_params.type == SurfaceType::DepthStencil) {
+ glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, 0, 0);
+ src_view->Attach(GL_DEPTH_STENCIL_ATTACHMENT, GL_READ_FRAMEBUFFER);
+
+ glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, 0, 0);
+ dst_view->Attach(GL_DEPTH_STENCIL_ATTACHMENT, GL_DRAW_FRAMEBUFFER);
+
+ buffers = GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT;
+ }
+
+ const Common::Rectangle<u32>& src_rect = copy_config.src_rect;
+ const Common::Rectangle<u32>& dst_rect = copy_config.dst_rect;
+ const bool is_linear = copy_config.filter == Tegra::Engines::Fermi2D::Filter::Linear;
+
+ glBlitFramebuffer(src_rect.left, src_rect.top, src_rect.right, src_rect.bottom, dst_rect.left,
+ dst_rect.top, dst_rect.right, dst_rect.bottom, buffers,
+ is_linear && (buffers == GL_COLOR_BUFFER_BIT) ? GL_LINEAR : GL_NEAREST);
+}
+
+void TextureCacheOpenGL::BufferCopy(Surface& src_surface, Surface& dst_surface) {
+ const auto& src_params = src_surface->GetSurfaceParams();
+ const auto& dst_params = dst_surface->GetSurfaceParams();
+ UNIMPLEMENTED_IF(src_params.num_levels > 1 || dst_params.num_levels > 1);
+
+ const auto source_format = GetFormatTuple(src_params.pixel_format, src_params.component_type);
+ const auto dest_format = GetFormatTuple(dst_params.pixel_format, dst_params.component_type);
+
+ const std::size_t source_size = src_surface->GetHostSizeInBytes();
+ const std::size_t dest_size = dst_surface->GetHostSizeInBytes();
+
+ const std::size_t buffer_size = std::max(source_size, dest_size);
+
+ GLuint copy_pbo_handle = FetchPBO(buffer_size);
+
+ glBindBuffer(GL_PIXEL_PACK_BUFFER, copy_pbo_handle);
+
+ if (source_format.compressed) {
+ glGetCompressedTextureImage(src_surface->GetTexture(), 0, static_cast<GLsizei>(source_size),
+ nullptr);
+ } else {
+ glGetTextureImage(src_surface->GetTexture(), 0, source_format.format, source_format.type,
+ static_cast<GLsizei>(source_size), nullptr);
+ }
+ glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
+
+ glBindBuffer(GL_PIXEL_UNPACK_BUFFER, copy_pbo_handle);
+
+ const GLsizei width = static_cast<GLsizei>(dst_params.width);
+ const GLsizei height = static_cast<GLsizei>(dst_params.height);
+ const GLsizei depth = static_cast<GLsizei>(dst_params.depth);
+ if (dest_format.compressed) {
+ LOG_CRITICAL(HW_GPU, "Compressed buffer copy is unimplemented!");
+ UNREACHABLE();
+ } else {
+ switch (dst_params.target) {
+ case SurfaceTarget::Texture1D:
+ glTextureSubImage1D(dst_surface->GetTexture(), 0, 0, width, dest_format.format,
+ dest_format.type, nullptr);
+ break;
+ case SurfaceTarget::Texture2D:
+ glTextureSubImage2D(dst_surface->GetTexture(), 0, 0, 0, width, height,
+ dest_format.format, dest_format.type, nullptr);
+ break;
+ case SurfaceTarget::Texture3D:
+ case SurfaceTarget::Texture2DArray:
+ case SurfaceTarget::TextureCubeArray:
+ glTextureSubImage3D(dst_surface->GetTexture(), 0, 0, 0, 0, width, height, depth,
+ dest_format.format, dest_format.type, nullptr);
+ break;
+ case SurfaceTarget::TextureCubemap:
+ glTextureSubImage3D(dst_surface->GetTexture(), 0, 0, 0, 0, width, height, depth,
+ dest_format.format, dest_format.type, nullptr);
+ break;
+ default:
+ LOG_CRITICAL(Render_OpenGL, "Unimplemented surface target={}",
+ static_cast<u32>(dst_params.target));
+ UNREACHABLE();
+ }
+ }
+ glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
+
+ glTextureBarrier();
+}
+
+GLuint TextureCacheOpenGL::FetchPBO(std::size_t buffer_size) {
+ ASSERT_OR_EXECUTE(buffer_size > 0, { return 0; });
+ const u32 l2 = Common::Log2Ceil64(static_cast<u64>(buffer_size));
+ OGLBuffer& cp = copy_pbo_cache[l2];
+ if (cp.handle == 0) {
+ const std::size_t ceil_size = 1ULL << l2;
+ cp.Create();
+ cp.MakeStreamCopy(ceil_size);
+ }
+ return cp.handle;
+}
+
+} // namespace OpenGL
diff --git a/src/video_core/renderer_opengl/gl_texture_cache.h b/src/video_core/renderer_opengl/gl_texture_cache.h
new file mode 100644
index 000000000..ff6ab6988
--- /dev/null
+++ b/src/video_core/renderer_opengl/gl_texture_cache.h
@@ -0,0 +1,143 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <array>
+#include <functional>
+#include <memory>
+#include <unordered_map>
+#include <utility>
+#include <vector>
+
+#include <glad/glad.h>
+
+#include "common/common_types.h"
+#include "video_core/engines/shader_bytecode.h"
+#include "video_core/renderer_opengl/gl_device.h"
+#include "video_core/renderer_opengl/gl_resource_manager.h"
+#include "video_core/texture_cache/texture_cache.h"
+
+namespace OpenGL {
+
+using VideoCommon::SurfaceParams;
+using VideoCommon::ViewParams;
+
+class CachedSurfaceView;
+class CachedSurface;
+class TextureCacheOpenGL;
+
+using Surface = std::shared_ptr<CachedSurface>;
+using View = std::shared_ptr<CachedSurfaceView>;
+using TextureCacheBase = VideoCommon::TextureCache<Surface, View>;
+
+class CachedSurface final : public VideoCommon::SurfaceBase<View> {
+ friend CachedSurfaceView;
+
+public:
+ explicit CachedSurface(GPUVAddr gpu_addr, const SurfaceParams& params);
+ ~CachedSurface();
+
+ void UploadTexture(const std::vector<u8>& staging_buffer) override;
+ void DownloadTexture(std::vector<u8>& staging_buffer) override;
+
+ GLenum GetTarget() const {
+ return target;
+ }
+
+ GLuint GetTexture() const {
+ return texture.handle;
+ }
+
+protected:
+ void DecorateSurfaceName();
+
+ View CreateView(const ViewParams& view_key) override;
+ View CreateViewInner(const ViewParams& view_key, bool is_proxy);
+
+private:
+ void UploadTextureMipmap(u32 level, const std::vector<u8>& staging_buffer);
+
+ GLenum internal_format{};
+ GLenum format{};
+ GLenum type{};
+ bool is_compressed{};
+ GLenum target{};
+ u32 view_count{};
+
+ OGLTexture texture;
+ OGLBuffer texture_buffer;
+};
+
+class CachedSurfaceView final : public VideoCommon::ViewBase {
+public:
+ explicit CachedSurfaceView(CachedSurface& surface, const ViewParams& params, bool is_proxy);
+ ~CachedSurfaceView();
+
+ /// Attaches this texture view to the current bound GL_DRAW_FRAMEBUFFER
+ void Attach(GLenum attachment, GLenum target) const;
+
+ GLuint GetTexture() const {
+ if (is_proxy) {
+ return surface.GetTexture();
+ }
+ return texture_view.handle;
+ }
+
+ const SurfaceParams& GetSurfaceParams() const {
+ return surface.GetSurfaceParams();
+ }
+
+ void ApplySwizzle(Tegra::Texture::SwizzleSource x_source,
+ Tegra::Texture::SwizzleSource y_source,
+ Tegra::Texture::SwizzleSource z_source,
+ Tegra::Texture::SwizzleSource w_source);
+
+ void DecorateViewName(GPUVAddr gpu_addr, std::string prefix);
+
+private:
+ u32 EncodeSwizzle(Tegra::Texture::SwizzleSource x_source,
+ Tegra::Texture::SwizzleSource y_source,
+ Tegra::Texture::SwizzleSource z_source,
+ Tegra::Texture::SwizzleSource w_source) const {
+ return (static_cast<u32>(x_source) << 24) | (static_cast<u32>(y_source) << 16) |
+ (static_cast<u32>(z_source) << 8) | static_cast<u32>(w_source);
+ }
+
+ OGLTextureView CreateTextureView() const;
+
+ CachedSurface& surface;
+ GLenum target{};
+
+ OGLTextureView texture_view;
+ u32 swizzle;
+ bool is_proxy;
+};
+
+class TextureCacheOpenGL final : public TextureCacheBase {
+public:
+ explicit TextureCacheOpenGL(Core::System& system, VideoCore::RasterizerInterface& rasterizer,
+ const Device& device);
+ ~TextureCacheOpenGL();
+
+protected:
+ Surface CreateSurface(GPUVAddr gpu_addr, const SurfaceParams& params) override;
+
+ void ImageCopy(Surface& src_surface, Surface& dst_surface,
+ const VideoCommon::CopyParams& copy_params) override;
+
+ void ImageBlit(View& src_view, View& dst_view,
+ const Tegra::Engines::Fermi2D::Config& copy_config) override;
+
+ void BufferCopy(Surface& src_surface, Surface& dst_surface) override;
+
+private:
+ GLuint FetchPBO(std::size_t buffer_size);
+
+ OGLFramebuffer src_framebuffer;
+ OGLFramebuffer dst_framebuffer;
+ std::unordered_map<u32, OGLBuffer> copy_pbo_cache;
+};
+
+} // namespace OpenGL
diff --git a/src/video_core/renderer_opengl/renderer_opengl.cpp b/src/video_core/renderer_opengl/renderer_opengl.cpp
index aafd6f31b..b142521ec 100644
--- a/src/video_core/renderer_opengl/renderer_opengl.cpp
+++ b/src/video_core/renderer_opengl/renderer_opengl.cpp
@@ -471,7 +471,6 @@ static void APIENTRY DebugHandler(GLenum source, GLenum type, GLuint id, GLenum
}
}
-/// Initialize the renderer
bool RendererOpenGL::Init() {
Core::Frontend::ScopeAcquireWindowContext acquire_context{render_window};
diff --git a/src/video_core/renderer_opengl/utils.cpp b/src/video_core/renderer_opengl/utils.cpp
index f23fc9f9d..68c36988d 100644
--- a/src/video_core/renderer_opengl/utils.cpp
+++ b/src/video_core/renderer_opengl/utils.cpp
@@ -5,8 +5,10 @@
#include <string>
#include <fmt/format.h>
#include <glad/glad.h>
+
#include "common/assert.h"
#include "common/common_types.h"
+#include "common/scope_exit.h"
#include "video_core/renderer_opengl/utils.h"
namespace OpenGL {
@@ -63,4 +65,4 @@ void LabelGLObject(GLenum identifier, GLuint handle, VAddr addr, std::string_vie
glObjectLabel(identifier, handle, -1, static_cast<const GLchar*>(object_label.c_str()));
}
-} // namespace OpenGL \ No newline at end of file
+} // namespace OpenGL
diff --git a/src/video_core/renderer_opengl/utils.h b/src/video_core/renderer_opengl/utils.h
index b3e9fc499..4a752f3b4 100644
--- a/src/video_core/renderer_opengl/utils.h
+++ b/src/video_core/renderer_opengl/utils.h
@@ -32,4 +32,4 @@ private:
void LabelGLObject(GLenum identifier, GLuint handle, VAddr addr, std::string_view extra_info = {});
-} // namespace OpenGL \ No newline at end of file
+} // namespace OpenGL
diff --git a/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp b/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp
index 33ad9764a..97ce214b1 100644
--- a/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp
+++ b/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp
@@ -935,6 +935,11 @@ private:
return {};
}
+ Id ImageStore(Operation operation) {
+ UNIMPLEMENTED();
+ return {};
+ }
+
Id Branch(Operation operation) {
const auto target = std::get_if<ImmediateNode>(&*operation[0]);
UNIMPLEMENTED_IF(!target);
@@ -1326,6 +1331,8 @@ private:
&SPIRVDecompiler::TextureQueryLod,
&SPIRVDecompiler::TexelFetch,
+ &SPIRVDecompiler::ImageStore,
+
&SPIRVDecompiler::Branch,
&SPIRVDecompiler::PushFlowStack,
&SPIRVDecompiler::PopFlowStack,
diff --git a/src/video_core/shader/decode.cpp b/src/video_core/shader/decode.cpp
index a0554c97e..2c9ff28f2 100644
--- a/src/video_core/shader/decode.cpp
+++ b/src/video_core/shader/decode.cpp
@@ -169,6 +169,7 @@ u32 ShaderIR::DecodeInstr(NodeBlock& bb, u32 pc) {
{OpCode::Type::Conversion, &ShaderIR::DecodeConversion},
{OpCode::Type::Memory, &ShaderIR::DecodeMemory},
{OpCode::Type::Texture, &ShaderIR::DecodeTexture},
+ {OpCode::Type::Image, &ShaderIR::DecodeImage},
{OpCode::Type::FloatSetPredicate, &ShaderIR::DecodeFloatSetPredicate},
{OpCode::Type::IntegerSetPredicate, &ShaderIR::DecodeIntegerSetPredicate},
{OpCode::Type::HalfSetPredicate, &ShaderIR::DecodeHalfSetPredicate},
diff --git a/src/video_core/shader/decode/decode_integer_set.cpp b/src/video_core/shader/decode/decode_integer_set.cpp
deleted file mode 100644
index e69de29bb..000000000
--- a/src/video_core/shader/decode/decode_integer_set.cpp
+++ /dev/null
diff --git a/src/video_core/shader/decode/image.cpp b/src/video_core/shader/decode/image.cpp
new file mode 100644
index 000000000..24f022cc0
--- /dev/null
+++ b/src/video_core/shader/decode/image.cpp
@@ -0,0 +1,120 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include <algorithm>
+#include <vector>
+#include <fmt/format.h>
+
+#include "common/assert.h"
+#include "common/bit_field.h"
+#include "common/common_types.h"
+#include "common/logging/log.h"
+#include "video_core/engines/shader_bytecode.h"
+#include "video_core/shader/node_helper.h"
+#include "video_core/shader/shader_ir.h"
+
+namespace VideoCommon::Shader {
+
+using Tegra::Shader::Instruction;
+using Tegra::Shader::OpCode;
+
+namespace {
+std::size_t GetImageTypeNumCoordinates(Tegra::Shader::ImageType image_type) {
+ switch (image_type) {
+ case Tegra::Shader::ImageType::Texture1D:
+ case Tegra::Shader::ImageType::TextureBuffer:
+ return 1;
+ case Tegra::Shader::ImageType::Texture1DArray:
+ case Tegra::Shader::ImageType::Texture2D:
+ return 2;
+ case Tegra::Shader::ImageType::Texture2DArray:
+ case Tegra::Shader::ImageType::Texture3D:
+ return 3;
+ }
+ UNREACHABLE();
+ return 1;
+}
+} // Anonymous namespace
+
+u32 ShaderIR::DecodeImage(NodeBlock& bb, u32 pc) {
+ const Instruction instr = {program_code[pc]};
+ const auto opcode = OpCode::Decode(instr);
+
+ switch (opcode->get().GetId()) {
+ case OpCode::Id::SUST: {
+ UNIMPLEMENTED_IF(instr.sust.mode != Tegra::Shader::SurfaceDataMode::P);
+ UNIMPLEMENTED_IF(instr.sust.image_type == Tegra::Shader::ImageType::TextureBuffer);
+ UNIMPLEMENTED_IF(instr.sust.out_of_bounds_store != Tegra::Shader::OutOfBoundsStore::Ignore);
+ UNIMPLEMENTED_IF(instr.sust.component_mask_selector != 0xf); // Ensure we have an RGBA store
+
+ std::vector<Node> values;
+ constexpr std::size_t hardcoded_size{4};
+ for (std::size_t i = 0; i < hardcoded_size; ++i) {
+ values.push_back(GetRegister(instr.gpr0.Value() + i));
+ }
+
+ std::vector<Node> coords;
+ const std::size_t num_coords{GetImageTypeNumCoordinates(instr.sust.image_type)};
+ for (std::size_t i = 0; i < num_coords; ++i) {
+ coords.push_back(GetRegister(instr.gpr8.Value() + i));
+ }
+
+ const auto type{instr.sust.image_type};
+ const auto& image{instr.sust.is_immediate ? GetImage(instr.image, type)
+ : GetBindlessImage(instr.gpr39, type)};
+ MetaImage meta{image, values};
+ const Node store{Operation(OperationCode::ImageStore, meta, std::move(coords))};
+ bb.push_back(store);
+ break;
+ }
+ default:
+ UNIMPLEMENTED_MSG("Unhandled conversion instruction: {}", opcode->get().GetName());
+ }
+
+ return pc;
+}
+
+const Image& ShaderIR::GetImage(Tegra::Shader::Image image, Tegra::Shader::ImageType type) {
+ const auto offset{static_cast<std::size_t>(image.index.Value())};
+
+ // If this image has already been used, return the existing mapping.
+ const auto itr{std::find_if(used_images.begin(), used_images.end(),
+ [=](const Image& entry) { return entry.GetOffset() == offset; })};
+ if (itr != used_images.end()) {
+ ASSERT(itr->GetType() == type);
+ return *itr;
+ }
+
+ // Otherwise create a new mapping for this image.
+ const std::size_t next_index{used_images.size()};
+ const Image entry{offset, next_index, type};
+ return *used_images.emplace(entry).first;
+}
+
+const Image& ShaderIR::GetBindlessImage(Tegra::Shader::Register reg,
+ Tegra::Shader::ImageType type) {
+ const Node image_register{GetRegister(reg)};
+ const Node base_image{
+ TrackCbuf(image_register, global_code, static_cast<s64>(global_code.size()))};
+ const auto cbuf{std::get_if<CbufNode>(&*base_image)};
+ const auto cbuf_offset_imm{std::get_if<ImmediateNode>(&*cbuf->GetOffset())};
+ const auto cbuf_offset{cbuf_offset_imm->GetValue()};
+ const auto cbuf_index{cbuf->GetIndex()};
+ const auto cbuf_key{(static_cast<u64>(cbuf_index) << 32) | static_cast<u64>(cbuf_offset)};
+
+ // If this image has already been used, return the existing mapping.
+ const auto itr{std::find_if(used_images.begin(), used_images.end(),
+ [=](const Image& entry) { return entry.GetOffset() == cbuf_key; })};
+ if (itr != used_images.end()) {
+ ASSERT(itr->GetType() == type);
+ return *itr;
+ }
+
+ // Otherwise create a new mapping for this image.
+ const std::size_t next_index{used_images.size()};
+ const Image entry{cbuf_index, cbuf_offset, next_index, type};
+ return *used_images.emplace(entry).first;
+}
+
+} // namespace VideoCommon::Shader
diff --git a/src/video_core/shader/decode/texture.cpp b/src/video_core/shader/decode/texture.cpp
index 4a356dbd4..cb480be9b 100644
--- a/src/video_core/shader/decode/texture.cpp
+++ b/src/video_core/shader/decode/texture.cpp
@@ -245,6 +245,18 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) {
}
break;
}
+ case OpCode::Id::TLD: {
+ UNIMPLEMENTED_IF_MSG(instr.tld.aoffi, "AOFFI is not implemented");
+ UNIMPLEMENTED_IF_MSG(instr.tld.ms, "MS is not implemented");
+ UNIMPLEMENTED_IF_MSG(instr.tld.cl, "CL is not implemented");
+
+ if (instr.tld.nodep_flag) {
+ LOG_WARNING(HW_GPU, "TLD.NODEP implementation is incomplete");
+ }
+
+ WriteTexInstructionFloat(bb, instr, GetTldCode(instr));
+ break;
+ }
case OpCode::Id::TLDS: {
const Tegra::Shader::TextureType texture_type{instr.tlds.GetTextureType()};
const bool is_array{instr.tlds.IsArrayTexture()};
@@ -575,6 +587,39 @@ Node4 ShaderIR::GetTld4Code(Instruction instr, TextureType texture_type, bool de
return values;
}
+Node4 ShaderIR::GetTldCode(Tegra::Shader::Instruction instr) {
+ const auto texture_type{instr.tld.texture_type};
+ const bool is_array{instr.tld.is_array};
+ const bool lod_enabled{instr.tld.GetTextureProcessMode() == TextureProcessMode::LL};
+ const std::size_t coord_count{GetCoordCount(texture_type)};
+
+ u64 gpr8_cursor{instr.gpr8.Value()};
+ const Node array_register{is_array ? GetRegister(gpr8_cursor++) : nullptr};
+
+ std::vector<Node> coords;
+ coords.reserve(coord_count);
+ for (std::size_t i = 0; i < coord_count; ++i) {
+ coords.push_back(GetRegister(gpr8_cursor++));
+ }
+
+ u64 gpr20_cursor{instr.gpr20.Value()};
+ // const Node bindless_register{is_bindless ? GetRegister(gpr20_cursor++) : nullptr};
+ const Node lod{lod_enabled ? GetRegister(gpr20_cursor++) : Immediate(0u)};
+ // const Node aoffi_register{is_aoffi ? GetRegister(gpr20_cursor++) : nullptr};
+ // const Node multisample{is_multisample ? GetRegister(gpr20_cursor++) : nullptr};
+
+ const auto& sampler = GetSampler(instr.sampler, texture_type, is_array, false);
+
+ Node4 values;
+ for (u32 element = 0; element < values.size(); ++element) {
+ auto coords_copy = coords;
+ MetaTexture meta{sampler, array_register, {}, {}, {}, lod, {}, element};
+ values[element] = Operation(OperationCode::TexelFetch, meta, std::move(coords_copy));
+ }
+
+ return values;
+}
+
Node4 ShaderIR::GetTldsCode(Instruction instr, TextureType texture_type, bool is_array) {
const std::size_t type_coord_count = GetCoordCount(texture_type);
const bool lod_enabled = instr.tlds.GetTextureProcessMode() == TextureProcessMode::LL;
diff --git a/src/video_core/shader/node.h b/src/video_core/shader/node.h
index 3cfb911bb..0ac83fcf0 100644
--- a/src/video_core/shader/node.h
+++ b/src/video_core/shader/node.h
@@ -146,6 +146,8 @@ enum class OperationCode {
TextureQueryLod, /// (MetaTexture, float[N] coords) -> float4
TexelFetch, /// (MetaTexture, int[N], int) -> float4
+ ImageStore, /// (MetaImage, float[N] coords) -> void
+
Branch, /// (uint branch_target) -> void
PushFlowStack, /// (uint branch_target) -> void
PopFlowStack, /// () -> void
@@ -263,6 +265,48 @@ private:
bool is_bindless{}; ///< Whether this sampler belongs to a bindless texture or not.
};
+class Image {
+public:
+ explicit Image(std::size_t offset, std::size_t index, Tegra::Shader::ImageType type)
+ : offset{offset}, index{index}, type{type}, is_bindless{false} {}
+
+ explicit Image(u32 cbuf_index, u32 cbuf_offset, std::size_t index,
+ Tegra::Shader::ImageType type)
+ : offset{(static_cast<u64>(cbuf_index) << 32) | cbuf_offset}, index{index}, type{type},
+ is_bindless{true} {}
+
+ explicit Image(std::size_t offset, std::size_t index, Tegra::Shader::ImageType type,
+ bool is_bindless)
+ : offset{offset}, index{index}, type{type}, is_bindless{is_bindless} {}
+
+ std::size_t GetOffset() const {
+ return offset;
+ }
+
+ std::size_t GetIndex() const {
+ return index;
+ }
+
+ Tegra::Shader::ImageType GetType() const {
+ return type;
+ }
+
+ bool IsBindless() const {
+ return is_bindless;
+ }
+
+ bool operator<(const Image& rhs) const {
+ return std::tie(offset, index, type, is_bindless) <
+ std::tie(rhs.offset, rhs.index, rhs.type, rhs.is_bindless);
+ }
+
+private:
+ std::size_t offset{};
+ std::size_t index{};
+ Tegra::Shader::ImageType type{};
+ bool is_bindless{};
+};
+
struct GlobalMemoryBase {
u32 cbuf_index{};
u32 cbuf_offset{};
@@ -289,8 +333,14 @@ struct MetaTexture {
u32 element{};
};
+struct MetaImage {
+ const Image& image;
+ std::vector<Node> values;
+};
+
/// Parameters that modify an operation but are not part of any particular operand
-using Meta = std::variant<MetaArithmetic, MetaTexture, MetaStackClass, Tegra::Shader::HalfType>;
+using Meta =
+ std::variant<MetaArithmetic, MetaTexture, MetaImage, MetaStackClass, Tegra::Shader::HalfType>;
/// Holds any kind of operation that can be done in the IR
class OperationNode final {
diff --git a/src/video_core/shader/shader_ir.h b/src/video_core/shader/shader_ir.h
index edcf2288e..e22548208 100644
--- a/src/video_core/shader/shader_ir.h
+++ b/src/video_core/shader/shader_ir.h
@@ -104,6 +104,10 @@ public:
return used_samplers;
}
+ const std::set<Image>& GetImages() const {
+ return used_images;
+ }
+
const std::array<bool, Tegra::Engines::Maxwell3D::Regs::NumClipDistances>& GetClipDistances()
const {
return used_clip_distances;
@@ -154,6 +158,7 @@ private:
u32 DecodeConversion(NodeBlock& bb, u32 pc);
u32 DecodeMemory(NodeBlock& bb, u32 pc);
u32 DecodeTexture(NodeBlock& bb, u32 pc);
+ u32 DecodeImage(NodeBlock& bb, u32 pc);
u32 DecodeFloatSetPredicate(NodeBlock& bb, u32 pc);
u32 DecodeIntegerSetPredicate(NodeBlock& bb, u32 pc);
u32 DecodeHalfSetPredicate(NodeBlock& bb, u32 pc);
@@ -254,6 +259,12 @@ private:
Tegra::Shader::TextureType type, bool is_array,
bool is_shadow);
+ /// Accesses an image.
+ const Image& GetImage(Tegra::Shader::Image image, Tegra::Shader::ImageType type);
+
+ /// Access a bindless image sampler.
+ const Image& GetBindlessImage(Tegra::Shader::Register reg, Tegra::Shader::ImageType type);
+
/// Extracts a sequence of bits from a node
Node BitfieldExtract(Node value, u32 offset, u32 bits);
@@ -277,6 +288,8 @@ private:
Node4 GetTld4Code(Tegra::Shader::Instruction instr, Tegra::Shader::TextureType texture_type,
bool depth_compare, bool is_array, bool is_aoffi);
+ Node4 GetTldCode(Tegra::Shader::Instruction instr);
+
Node4 GetTldsCode(Tegra::Shader::Instruction instr, Tegra::Shader::TextureType texture_type,
bool is_array);
@@ -327,6 +340,7 @@ private:
std::set<Tegra::Shader::Attribute::Index> used_output_attributes;
std::map<u32, ConstBuffer> used_cbufs;
std::set<Sampler> used_samplers;
+ std::set<Image> used_images;
std::array<bool, Tegra::Engines::Maxwell3D::Regs::NumClipDistances> used_clip_distances{};
std::map<GlobalMemoryBase, GlobalMemoryUsage> used_global_memory;
bool uses_physical_attributes{}; // Shader uses AL2P or physical attribute read/writes
diff --git a/src/video_core/surface.cpp b/src/video_core/surface.cpp
index 6384fa8d2..c50f6354d 100644
--- a/src/video_core/surface.cpp
+++ b/src/video_core/surface.cpp
@@ -12,6 +12,8 @@ SurfaceTarget SurfaceTargetFromTextureType(Tegra::Texture::TextureType texture_t
switch (texture_type) {
case Tegra::Texture::TextureType::Texture1D:
return SurfaceTarget::Texture1D;
+ case Tegra::Texture::TextureType::Texture1DBuffer:
+ return SurfaceTarget::TextureBuffer;
case Tegra::Texture::TextureType::Texture2D:
case Tegra::Texture::TextureType::Texture2DNoMipmap:
return SurfaceTarget::Texture2D;
@@ -35,6 +37,7 @@ SurfaceTarget SurfaceTargetFromTextureType(Tegra::Texture::TextureType texture_t
bool SurfaceTargetIsLayered(SurfaceTarget target) {
switch (target) {
case SurfaceTarget::Texture1D:
+ case SurfaceTarget::TextureBuffer:
case SurfaceTarget::Texture2D:
case SurfaceTarget::Texture3D:
return false;
@@ -53,6 +56,7 @@ bool SurfaceTargetIsLayered(SurfaceTarget target) {
bool SurfaceTargetIsArray(SurfaceTarget target) {
switch (target) {
case SurfaceTarget::Texture1D:
+ case SurfaceTarget::TextureBuffer:
case SurfaceTarget::Texture2D:
case SurfaceTarget::Texture3D:
case SurfaceTarget::TextureCubemap:
@@ -304,8 +308,8 @@ PixelFormat PixelFormatFromTextureFormat(Tegra::Texture::TextureFormat format,
return PixelFormat::Z32F;
case Tegra::Texture::TextureFormat::Z16:
return PixelFormat::Z16;
- case Tegra::Texture::TextureFormat::Z24S8:
- return PixelFormat::Z24S8;
+ case Tegra::Texture::TextureFormat::S8Z24:
+ return PixelFormat::S8Z24;
case Tegra::Texture::TextureFormat::ZF32_X24S8:
return PixelFormat::Z32FS8;
case Tegra::Texture::TextureFormat::DXT1:
diff --git a/src/video_core/surface.h b/src/video_core/surface.h
index b783e4b27..83f31c12c 100644
--- a/src/video_core/surface.h
+++ b/src/video_core/surface.h
@@ -114,6 +114,7 @@ enum class SurfaceType {
enum class SurfaceTarget {
Texture1D,
+ TextureBuffer,
Texture2D,
Texture3D,
Texture1DArray,
@@ -122,71 +123,71 @@ enum class SurfaceTarget {
TextureCubeArray,
};
-constexpr std::array<u32, MaxPixelFormat> compression_factor_table = {{
- 1, // ABGR8U
- 1, // ABGR8S
- 1, // ABGR8UI
- 1, // B5G6R5U
- 1, // A2B10G10R10U
- 1, // A1B5G5R5U
- 1, // R8U
- 1, // R8UI
- 1, // RGBA16F
- 1, // RGBA16U
- 1, // RGBA16UI
- 1, // R11FG11FB10F
- 1, // RGBA32UI
- 4, // DXT1
- 4, // DXT23
- 4, // DXT45
- 4, // DXN1
- 4, // DXN2UNORM
- 4, // DXN2SNORM
- 4, // BC7U
- 4, // BC6H_UF16
- 4, // BC6H_SF16
- 4, // ASTC_2D_4X4
- 1, // BGRA8
- 1, // RGBA32F
- 1, // RG32F
- 1, // R32F
- 1, // R16F
- 1, // R16U
- 1, // R16S
- 1, // R16UI
- 1, // R16I
- 1, // RG16
- 1, // RG16F
- 1, // RG16UI
- 1, // RG16I
- 1, // RG16S
- 1, // RGB32F
- 1, // RGBA8_SRGB
- 1, // RG8U
- 1, // RG8S
- 1, // RG32UI
- 1, // R32UI
- 4, // ASTC_2D_8X8
- 4, // ASTC_2D_8X5
- 4, // ASTC_2D_5X4
- 1, // BGRA8_SRGB
- 4, // DXT1_SRGB
- 4, // DXT23_SRGB
- 4, // DXT45_SRGB
- 4, // BC7U_SRGB
- 4, // ASTC_2D_4X4_SRGB
- 4, // ASTC_2D_8X8_SRGB
- 4, // ASTC_2D_8X5_SRGB
- 4, // ASTC_2D_5X4_SRGB
- 4, // ASTC_2D_5X5
- 4, // ASTC_2D_5X5_SRGB
- 4, // ASTC_2D_10X8
- 4, // ASTC_2D_10X8_SRGB
- 1, // Z32F
- 1, // Z16
- 1, // Z24S8
- 1, // S8Z24
- 1, // Z32FS8
+constexpr std::array<u32, MaxPixelFormat> compression_factor_shift_table = {{
+ 0, // ABGR8U
+ 0, // ABGR8S
+ 0, // ABGR8UI
+ 0, // B5G6R5U
+ 0, // A2B10G10R10U
+ 0, // A1B5G5R5U
+ 0, // R8U
+ 0, // R8UI
+ 0, // RGBA16F
+ 0, // RGBA16U
+ 0, // RGBA16UI
+ 0, // R11FG11FB10F
+ 0, // RGBA32UI
+ 2, // DXT1
+ 2, // DXT23
+ 2, // DXT45
+ 2, // DXN1
+ 2, // DXN2UNORM
+ 2, // DXN2SNORM
+ 2, // BC7U
+ 2, // BC6H_UF16
+ 2, // BC6H_SF16
+ 2, // ASTC_2D_4X4
+ 0, // BGRA8
+ 0, // RGBA32F
+ 0, // RG32F
+ 0, // R32F
+ 0, // R16F
+ 0, // R16U
+ 0, // R16S
+ 0, // R16UI
+ 0, // R16I
+ 0, // RG16
+ 0, // RG16F
+ 0, // RG16UI
+ 0, // RG16I
+ 0, // RG16S
+ 0, // RGB32F
+ 0, // RGBA8_SRGB
+ 0, // RG8U
+ 0, // RG8S
+ 0, // RG32UI
+ 0, // R32UI
+ 2, // ASTC_2D_8X8
+ 2, // ASTC_2D_8X5
+ 2, // ASTC_2D_5X4
+ 0, // BGRA8_SRGB
+ 2, // DXT1_SRGB
+ 2, // DXT23_SRGB
+ 2, // DXT45_SRGB
+ 2, // BC7U_SRGB
+ 2, // ASTC_2D_4X4_SRGB
+ 2, // ASTC_2D_8X8_SRGB
+ 2, // ASTC_2D_8X5_SRGB
+ 2, // ASTC_2D_5X4_SRGB
+ 2, // ASTC_2D_5X5
+ 2, // ASTC_2D_5X5_SRGB
+ 2, // ASTC_2D_10X8
+ 2, // ASTC_2D_10X8_SRGB
+ 0, // Z32F
+ 0, // Z16
+ 0, // Z24S8
+ 0, // S8Z24
+ 0, // Z32FS8
}};
/**
@@ -195,12 +196,14 @@ constexpr std::array<u32, MaxPixelFormat> compression_factor_table = {{
* compressed image. This is used for maintaining proper surface sizes for compressed
* texture formats.
*/
-static constexpr u32 GetCompressionFactor(PixelFormat format) {
- if (format == PixelFormat::Invalid)
- return 0;
+inline constexpr u32 GetCompressionFactorShift(PixelFormat format) {
+ DEBUG_ASSERT(format != PixelFormat::Invalid);
+ DEBUG_ASSERT(static_cast<std::size_t>(format) < compression_factor_shift_table.size());
+ return compression_factor_shift_table[static_cast<std::size_t>(format)];
+}
- ASSERT(static_cast<std::size_t>(format) < compression_factor_table.size());
- return compression_factor_table[static_cast<std::size_t>(format)];
+inline constexpr u32 GetCompressionFactor(PixelFormat format) {
+ return 1U << GetCompressionFactorShift(format);
}
constexpr std::array<u32, MaxPixelFormat> block_width_table = {{
@@ -436,6 +439,88 @@ static constexpr u32 GetBytesPerPixel(PixelFormat pixel_format) {
return GetFormatBpp(pixel_format) / CHAR_BIT;
}
+enum class SurfaceCompression {
+ None, // Not compressed
+ Compressed, // Texture is compressed
+ Converted, // Texture is converted before upload or after download
+ Rearranged, // Texture is swizzled before upload or after download
+};
+
+constexpr std::array<SurfaceCompression, MaxPixelFormat> compression_type_table = {{
+ SurfaceCompression::None, // ABGR8U
+ SurfaceCompression::None, // ABGR8S
+ SurfaceCompression::None, // ABGR8UI
+ SurfaceCompression::None, // B5G6R5U
+ SurfaceCompression::None, // A2B10G10R10U
+ SurfaceCompression::None, // A1B5G5R5U
+ SurfaceCompression::None, // R8U
+ SurfaceCompression::None, // R8UI
+ SurfaceCompression::None, // RGBA16F
+ SurfaceCompression::None, // RGBA16U
+ SurfaceCompression::None, // RGBA16UI
+ SurfaceCompression::None, // R11FG11FB10F
+ SurfaceCompression::None, // RGBA32UI
+ SurfaceCompression::Compressed, // DXT1
+ SurfaceCompression::Compressed, // DXT23
+ SurfaceCompression::Compressed, // DXT45
+ SurfaceCompression::Compressed, // DXN1
+ SurfaceCompression::Compressed, // DXN2UNORM
+ SurfaceCompression::Compressed, // DXN2SNORM
+ SurfaceCompression::Compressed, // BC7U
+ SurfaceCompression::Compressed, // BC6H_UF16
+ SurfaceCompression::Compressed, // BC6H_SF16
+ SurfaceCompression::Converted, // ASTC_2D_4X4
+ SurfaceCompression::None, // BGRA8
+ SurfaceCompression::None, // RGBA32F
+ SurfaceCompression::None, // RG32F
+ SurfaceCompression::None, // R32F
+ SurfaceCompression::None, // R16F
+ SurfaceCompression::None, // R16U
+ SurfaceCompression::None, // R16S
+ SurfaceCompression::None, // R16UI
+ SurfaceCompression::None, // R16I
+ SurfaceCompression::None, // RG16
+ SurfaceCompression::None, // RG16F
+ SurfaceCompression::None, // RG16UI
+ SurfaceCompression::None, // RG16I
+ SurfaceCompression::None, // RG16S
+ SurfaceCompression::None, // RGB32F
+ SurfaceCompression::None, // RGBA8_SRGB
+ SurfaceCompression::None, // RG8U
+ SurfaceCompression::None, // RG8S
+ SurfaceCompression::None, // RG32UI
+ SurfaceCompression::None, // R32UI
+ SurfaceCompression::Converted, // ASTC_2D_8X8
+ SurfaceCompression::Converted, // ASTC_2D_8X5
+ SurfaceCompression::Converted, // ASTC_2D_5X4
+ SurfaceCompression::None, // BGRA8_SRGB
+ SurfaceCompression::Compressed, // DXT1_SRGB
+ SurfaceCompression::Compressed, // DXT23_SRGB
+ SurfaceCompression::Compressed, // DXT45_SRGB
+ SurfaceCompression::Compressed, // BC7U_SRGB
+ SurfaceCompression::Converted, // ASTC_2D_4X4_SRGB
+ SurfaceCompression::Converted, // ASTC_2D_8X8_SRGB
+ SurfaceCompression::Converted, // ASTC_2D_8X5_SRGB
+ SurfaceCompression::Converted, // ASTC_2D_5X4_SRGB
+ SurfaceCompression::Converted, // ASTC_2D_5X5
+ SurfaceCompression::Converted, // ASTC_2D_5X5_SRGB
+ SurfaceCompression::Converted, // ASTC_2D_10X8
+ SurfaceCompression::Converted, // ASTC_2D_10X8_SRGB
+ SurfaceCompression::None, // Z32F
+ SurfaceCompression::None, // Z16
+ SurfaceCompression::None, // Z24S8
+ SurfaceCompression::Rearranged, // S8Z24
+ SurfaceCompression::None, // Z32FS8
+}};
+
+constexpr SurfaceCompression GetFormatCompressionType(PixelFormat format) {
+ if (format == PixelFormat::Invalid) {
+ return SurfaceCompression::None;
+ }
+ DEBUG_ASSERT(static_cast<std::size_t>(format) < compression_type_table.size());
+ return compression_type_table[static_cast<std::size_t>(format)];
+}
+
SurfaceTarget SurfaceTargetFromTextureType(Tegra::Texture::TextureType texture_type);
bool SurfaceTargetIsLayered(SurfaceTarget target);
diff --git a/src/video_core/texture_cache.cpp b/src/video_core/texture_cache.cpp
deleted file mode 100644
index e96eba7cc..000000000
--- a/src/video_core/texture_cache.cpp
+++ /dev/null
@@ -1,386 +0,0 @@
-// Copyright 2019 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
-
-#include "common/alignment.h"
-#include "common/assert.h"
-#include "common/cityhash.h"
-#include "common/common_types.h"
-#include "core/core.h"
-#include "video_core/surface.h"
-#include "video_core/texture_cache.h"
-#include "video_core/textures/decoders.h"
-#include "video_core/textures/texture.h"
-
-namespace VideoCommon {
-
-using VideoCore::Surface::SurfaceTarget;
-
-using VideoCore::Surface::ComponentTypeFromDepthFormat;
-using VideoCore::Surface::ComponentTypeFromRenderTarget;
-using VideoCore::Surface::ComponentTypeFromTexture;
-using VideoCore::Surface::PixelFormatFromDepthFormat;
-using VideoCore::Surface::PixelFormatFromRenderTargetFormat;
-using VideoCore::Surface::PixelFormatFromTextureFormat;
-using VideoCore::Surface::SurfaceTargetFromTextureType;
-
-constexpr u32 GetMipmapSize(bool uncompressed, u32 mip_size, u32 tile) {
- return uncompressed ? mip_size : std::max(1U, (mip_size + tile - 1) / tile);
-}
-
-SurfaceParams SurfaceParams::CreateForTexture(Core::System& system,
- const Tegra::Texture::FullTextureInfo& config) {
- SurfaceParams params;
- params.is_tiled = config.tic.IsTiled();
- params.block_width = params.is_tiled ? config.tic.BlockWidth() : 0,
- params.block_height = params.is_tiled ? config.tic.BlockHeight() : 0,
- params.block_depth = params.is_tiled ? config.tic.BlockDepth() : 0,
- params.tile_width_spacing = params.is_tiled ? (1 << config.tic.tile_width_spacing.Value()) : 1;
- params.pixel_format =
- PixelFormatFromTextureFormat(config.tic.format, config.tic.r_type.Value(), false);
- params.component_type = ComponentTypeFromTexture(config.tic.r_type.Value());
- params.type = GetFormatType(params.pixel_format);
- params.target = SurfaceTargetFromTextureType(config.tic.texture_type);
- params.width = Common::AlignUp(config.tic.Width(), GetCompressionFactor(params.pixel_format));
- params.height = Common::AlignUp(config.tic.Height(), GetCompressionFactor(params.pixel_format));
- params.depth = config.tic.Depth();
- if (params.target == SurfaceTarget::TextureCubemap ||
- params.target == SurfaceTarget::TextureCubeArray) {
- params.depth *= 6;
- }
- params.pitch = params.is_tiled ? 0 : config.tic.Pitch();
- params.unaligned_height = config.tic.Height();
- params.num_levels = config.tic.max_mip_level + 1;
-
- params.CalculateCachedValues();
- return params;
-}
-
-SurfaceParams SurfaceParams::CreateForDepthBuffer(
- Core::System& system, u32 zeta_width, u32 zeta_height, Tegra::DepthFormat format,
- u32 block_width, u32 block_height, u32 block_depth,
- Tegra::Engines::Maxwell3D::Regs::InvMemoryLayout type) {
- SurfaceParams params;
- params.is_tiled = type == Tegra::Engines::Maxwell3D::Regs::InvMemoryLayout::BlockLinear;
- params.block_width = 1 << std::min(block_width, 5U);
- params.block_height = 1 << std::min(block_height, 5U);
- params.block_depth = 1 << std::min(block_depth, 5U);
- params.tile_width_spacing = 1;
- params.pixel_format = PixelFormatFromDepthFormat(format);
- params.component_type = ComponentTypeFromDepthFormat(format);
- params.type = GetFormatType(params.pixel_format);
- params.width = zeta_width;
- params.height = zeta_height;
- params.unaligned_height = zeta_height;
- params.target = SurfaceTarget::Texture2D;
- params.depth = 1;
- params.num_levels = 1;
-
- params.CalculateCachedValues();
- return params;
-}
-
-SurfaceParams SurfaceParams::CreateForFramebuffer(Core::System& system, std::size_t index) {
- const auto& config{system.GPU().Maxwell3D().regs.rt[index]};
- SurfaceParams params;
- params.is_tiled =
- config.memory_layout.type == Tegra::Engines::Maxwell3D::Regs::InvMemoryLayout::BlockLinear;
- params.block_width = 1 << config.memory_layout.block_width;
- params.block_height = 1 << config.memory_layout.block_height;
- params.block_depth = 1 << config.memory_layout.block_depth;
- params.tile_width_spacing = 1;
- params.pixel_format = PixelFormatFromRenderTargetFormat(config.format);
- params.component_type = ComponentTypeFromRenderTarget(config.format);
- params.type = GetFormatType(params.pixel_format);
- if (params.is_tiled) {
- params.width = config.width;
- } else {
- const u32 bpp = GetFormatBpp(params.pixel_format) / CHAR_BIT;
- params.pitch = config.width;
- params.width = params.pitch / bpp;
- }
- params.height = config.height;
- params.depth = 1;
- params.unaligned_height = config.height;
- params.target = SurfaceTarget::Texture2D;
- params.num_levels = 1;
-
- params.CalculateCachedValues();
- return params;
-}
-
-SurfaceParams SurfaceParams::CreateForFermiCopySurface(
- const Tegra::Engines::Fermi2D::Regs::Surface& config) {
- SurfaceParams params{};
- params.is_tiled = !config.linear;
- params.block_width = params.is_tiled ? std::min(config.BlockWidth(), 32U) : 0,
- params.block_height = params.is_tiled ? std::min(config.BlockHeight(), 32U) : 0,
- params.block_depth = params.is_tiled ? std::min(config.BlockDepth(), 32U) : 0,
- params.tile_width_spacing = 1;
- params.pixel_format = PixelFormatFromRenderTargetFormat(config.format);
- params.component_type = ComponentTypeFromRenderTarget(config.format);
- params.type = GetFormatType(params.pixel_format);
- params.width = config.width;
- params.height = config.height;
- params.unaligned_height = config.height;
- // TODO(Rodrigo): Try to guess the surface target from depth and layer parameters
- params.target = SurfaceTarget::Texture2D;
- params.depth = 1;
- params.num_levels = 1;
-
- params.CalculateCachedValues();
- return params;
-}
-
-u32 SurfaceParams::GetMipWidth(u32 level) const {
- return std::max(1U, width >> level);
-}
-
-u32 SurfaceParams::GetMipHeight(u32 level) const {
- return std::max(1U, height >> level);
-}
-
-u32 SurfaceParams::GetMipDepth(u32 level) const {
- return IsLayered() ? depth : std::max(1U, depth >> level);
-}
-
-bool SurfaceParams::IsLayered() const {
- switch (target) {
- case SurfaceTarget::Texture1DArray:
- case SurfaceTarget::Texture2DArray:
- case SurfaceTarget::TextureCubeArray:
- case SurfaceTarget::TextureCubemap:
- return true;
- default:
- return false;
- }
-}
-
-u32 SurfaceParams::GetMipBlockHeight(u32 level) const {
- // Auto block resizing algorithm from:
- // https://cgit.freedesktop.org/mesa/mesa/tree/src/gallium/drivers/nouveau/nv50/nv50_miptree.c
- if (level == 0) {
- return block_height;
- }
- const u32 height{GetMipHeight(level)};
- const u32 default_block_height{GetDefaultBlockHeight(pixel_format)};
- const u32 blocks_in_y{(height + default_block_height - 1) / default_block_height};
- u32 block_height = 16;
- while (block_height > 1 && blocks_in_y <= block_height * 4) {
- block_height >>= 1;
- }
- return block_height;
-}
-
-u32 SurfaceParams::GetMipBlockDepth(u32 level) const {
- if (level == 0)
- return block_depth;
- if (target != SurfaceTarget::Texture3D)
- return 1;
-
- const u32 depth{GetMipDepth(level)};
- u32 block_depth = 32;
- while (block_depth > 1 && depth * 2 <= block_depth) {
- block_depth >>= 1;
- }
- if (block_depth == 32 && GetMipBlockHeight(level) >= 4) {
- return 16;
- }
- return block_depth;
-}
-
-std::size_t SurfaceParams::GetGuestMipmapLevelOffset(u32 level) const {
- std::size_t offset = 0;
- for (u32 i = 0; i < level; i++) {
- offset += GetInnerMipmapMemorySize(i, false, IsLayered(), false);
- }
- return offset;
-}
-
-std::size_t SurfaceParams::GetHostMipmapLevelOffset(u32 level) const {
- std::size_t offset = 0;
- for (u32 i = 0; i < level; i++) {
- offset += GetInnerMipmapMemorySize(i, true, false, false);
- }
- return offset;
-}
-
-std::size_t SurfaceParams::GetGuestLayerSize() const {
- return GetInnerMemorySize(false, true, false);
-}
-
-std::size_t SurfaceParams::GetHostLayerSize(u32 level) const {
- return GetInnerMipmapMemorySize(level, true, IsLayered(), false);
-}
-
-bool SurfaceParams::IsFamiliar(const SurfaceParams& view_params) const {
- if (std::tie(is_tiled, tile_width_spacing, pixel_format, component_type, type) !=
- std::tie(view_params.is_tiled, view_params.tile_width_spacing, view_params.pixel_format,
- view_params.component_type, view_params.type)) {
- return false;
- }
-
- const SurfaceTarget view_target{view_params.target};
- if (view_target == target) {
- return true;
- }
-
- switch (target) {
- case SurfaceTarget::Texture1D:
- case SurfaceTarget::Texture2D:
- case SurfaceTarget::Texture3D:
- return false;
- case SurfaceTarget::Texture1DArray:
- return view_target == SurfaceTarget::Texture1D;
- case SurfaceTarget::Texture2DArray:
- return view_target == SurfaceTarget::Texture2D;
- case SurfaceTarget::TextureCubemap:
- return view_target == SurfaceTarget::Texture2D ||
- view_target == SurfaceTarget::Texture2DArray;
- case SurfaceTarget::TextureCubeArray:
- return view_target == SurfaceTarget::Texture2D ||
- view_target == SurfaceTarget::Texture2DArray ||
- view_target == SurfaceTarget::TextureCubemap;
- default:
- UNIMPLEMENTED_MSG("Unimplemented texture family={}", static_cast<u32>(target));
- return false;
- }
-}
-
-bool SurfaceParams::IsPixelFormatZeta() const {
- return pixel_format >= VideoCore::Surface::PixelFormat::MaxColorFormat &&
- pixel_format < VideoCore::Surface::PixelFormat::MaxDepthStencilFormat;
-}
-
-void SurfaceParams::CalculateCachedValues() {
- guest_size_in_bytes = GetInnerMemorySize(false, false, false);
-
- // ASTC is uncompressed in software, in emulated as RGBA8
- if (IsPixelFormatASTC(pixel_format)) {
- host_size_in_bytes = width * height * depth * 4;
- } else {
- host_size_in_bytes = GetInnerMemorySize(true, false, false);
- }
-
- switch (target) {
- case SurfaceTarget::Texture1D:
- case SurfaceTarget::Texture2D:
- case SurfaceTarget::Texture3D:
- num_layers = 1;
- break;
- case SurfaceTarget::Texture1DArray:
- case SurfaceTarget::Texture2DArray:
- case SurfaceTarget::TextureCubemap:
- case SurfaceTarget::TextureCubeArray:
- num_layers = depth;
- break;
- default:
- UNREACHABLE();
- }
-}
-
-std::size_t SurfaceParams::GetInnerMipmapMemorySize(u32 level, bool as_host_size, bool layer_only,
- bool uncompressed) const {
- const bool tiled{as_host_size ? false : is_tiled};
- const u32 tile_x{GetDefaultBlockWidth(pixel_format)};
- const u32 tile_y{GetDefaultBlockHeight(pixel_format)};
- const u32 width{GetMipmapSize(uncompressed, GetMipWidth(level), tile_x)};
- const u32 height{GetMipmapSize(uncompressed, GetMipHeight(level), tile_y)};
- const u32 depth{layer_only ? 1U : GetMipDepth(level)};
- return Tegra::Texture::CalculateSize(tiled, GetBytesPerPixel(pixel_format), width, height,
- depth, GetMipBlockHeight(level), GetMipBlockDepth(level));
-}
-
-std::size_t SurfaceParams::GetInnerMemorySize(bool as_host_size, bool layer_only,
- bool uncompressed) const {
- std::size_t size = 0;
- for (u32 level = 0; level < num_levels; ++level) {
- size += GetInnerMipmapMemorySize(level, as_host_size, layer_only, uncompressed);
- }
- if (!as_host_size && is_tiled) {
- size = Common::AlignUp(size, Tegra::Texture::GetGOBSize() * block_height * block_depth);
- }
- return size;
-}
-
-std::map<u64, std::pair<u32, u32>> SurfaceParams::CreateViewOffsetMap() const {
- std::map<u64, std::pair<u32, u32>> view_offset_map;
- switch (target) {
- case SurfaceTarget::Texture1D:
- case SurfaceTarget::Texture2D:
- case SurfaceTarget::Texture3D: {
- constexpr u32 layer = 0;
- for (u32 level = 0; level < num_levels; ++level) {
- const std::size_t offset{GetGuestMipmapLevelOffset(level)};
- view_offset_map.insert({offset, {layer, level}});
- }
- break;
- }
- case SurfaceTarget::Texture1DArray:
- case SurfaceTarget::Texture2DArray:
- case SurfaceTarget::TextureCubemap:
- case SurfaceTarget::TextureCubeArray: {
- const std::size_t layer_size{GetGuestLayerSize()};
- for (u32 level = 0; level < num_levels; ++level) {
- const std::size_t level_offset{GetGuestMipmapLevelOffset(level)};
- for (u32 layer = 0; layer < num_layers; ++layer) {
- const auto layer_offset{static_cast<std::size_t>(layer_size * layer)};
- const std::size_t offset{level_offset + layer_offset};
- view_offset_map.insert({offset, {layer, level}});
- }
- }
- break;
- }
- default:
- UNIMPLEMENTED_MSG("Unimplemented surface target {}", static_cast<u32>(target));
- }
- return view_offset_map;
-}
-
-bool SurfaceParams::IsViewValid(const SurfaceParams& view_params, u32 layer, u32 level) const {
- return IsDimensionValid(view_params, level) && IsDepthValid(view_params, level) &&
- IsInBounds(view_params, layer, level);
-}
-
-bool SurfaceParams::IsDimensionValid(const SurfaceParams& view_params, u32 level) const {
- return view_params.width == GetMipWidth(level) && view_params.height == GetMipHeight(level);
-}
-
-bool SurfaceParams::IsDepthValid(const SurfaceParams& view_params, u32 level) const {
- if (view_params.target != SurfaceTarget::Texture3D) {
- return true;
- }
- return view_params.depth == GetMipDepth(level);
-}
-
-bool SurfaceParams::IsInBounds(const SurfaceParams& view_params, u32 layer, u32 level) const {
- return layer + view_params.num_layers <= num_layers &&
- level + view_params.num_levels <= num_levels;
-}
-
-std::size_t HasheableSurfaceParams::Hash() const {
- return static_cast<std::size_t>(
- Common::CityHash64(reinterpret_cast<const char*>(this), sizeof(*this)));
-}
-
-bool HasheableSurfaceParams::operator==(const HasheableSurfaceParams& rhs) const {
- return std::tie(is_tiled, block_width, block_height, block_depth, tile_width_spacing, width,
- height, depth, pitch, unaligned_height, num_levels, pixel_format,
- component_type, type, target) ==
- std::tie(rhs.is_tiled, rhs.block_width, rhs.block_height, rhs.block_depth,
- rhs.tile_width_spacing, rhs.width, rhs.height, rhs.depth, rhs.pitch,
- rhs.unaligned_height, rhs.num_levels, rhs.pixel_format, rhs.component_type,
- rhs.type, rhs.target);
-}
-
-std::size_t ViewKey::Hash() const {
- return static_cast<std::size_t>(
- Common::CityHash64(reinterpret_cast<const char*>(this), sizeof(*this)));
-}
-
-bool ViewKey::operator==(const ViewKey& rhs) const {
- return std::tie(base_layer, num_layers, base_level, num_levels) ==
- std::tie(rhs.base_layer, rhs.num_layers, rhs.base_level, rhs.num_levels);
-}
-
-} // namespace VideoCommon
diff --git a/src/video_core/texture_cache.h b/src/video_core/texture_cache.h
deleted file mode 100644
index 041551691..000000000
--- a/src/video_core/texture_cache.h
+++ /dev/null
@@ -1,586 +0,0 @@
-// Copyright 2019 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
-
-#pragma once
-
-#include <list>
-#include <memory>
-#include <set>
-#include <tuple>
-#include <type_traits>
-#include <unordered_map>
-
-#include <boost/icl/interval_map.hpp>
-#include <boost/range/iterator_range.hpp>
-
-#include "common/assert.h"
-#include "common/common_types.h"
-#include "core/memory.h"
-#include "video_core/engines/fermi_2d.h"
-#include "video_core/engines/maxwell_3d.h"
-#include "video_core/gpu.h"
-#include "video_core/rasterizer_interface.h"
-#include "video_core/surface.h"
-
-namespace Core {
-class System;
-}
-
-namespace Tegra::Texture {
-struct FullTextureInfo;
-}
-
-namespace VideoCore {
-class RasterizerInterface;
-}
-
-namespace VideoCommon {
-
-class HasheableSurfaceParams {
-public:
- std::size_t Hash() const;
-
- bool operator==(const HasheableSurfaceParams& rhs) const;
-
-protected:
- // Avoid creation outside of a managed environment.
- HasheableSurfaceParams() = default;
-
- bool is_tiled;
- u32 block_width;
- u32 block_height;
- u32 block_depth;
- u32 tile_width_spacing;
- u32 width;
- u32 height;
- u32 depth;
- u32 pitch;
- u32 unaligned_height;
- u32 num_levels;
- VideoCore::Surface::PixelFormat pixel_format;
- VideoCore::Surface::ComponentType component_type;
- VideoCore::Surface::SurfaceType type;
- VideoCore::Surface::SurfaceTarget target;
-};
-
-class SurfaceParams final : public HasheableSurfaceParams {
-public:
- /// Creates SurfaceCachedParams from a texture configuration.
- static SurfaceParams CreateForTexture(Core::System& system,
- const Tegra::Texture::FullTextureInfo& config);
-
- /// Creates SurfaceCachedParams for a depth buffer configuration.
- static SurfaceParams CreateForDepthBuffer(
- Core::System& system, u32 zeta_width, u32 zeta_height, Tegra::DepthFormat format,
- u32 block_width, u32 block_height, u32 block_depth,
- Tegra::Engines::Maxwell3D::Regs::InvMemoryLayout type);
-
- /// Creates SurfaceCachedParams from a framebuffer configuration.
- static SurfaceParams CreateForFramebuffer(Core::System& system, std::size_t index);
-
- /// Creates SurfaceCachedParams from a Fermi2D surface configuration.
- static SurfaceParams CreateForFermiCopySurface(
- const Tegra::Engines::Fermi2D::Regs::Surface& config);
-
- bool IsTiled() const {
- return is_tiled;
- }
-
- u32 GetBlockWidth() const {
- return block_width;
- }
-
- u32 GetTileWidthSpacing() const {
- return tile_width_spacing;
- }
-
- u32 GetWidth() const {
- return width;
- }
-
- u32 GetHeight() const {
- return height;
- }
-
- u32 GetDepth() const {
- return depth;
- }
-
- u32 GetPitch() const {
- return pitch;
- }
-
- u32 GetNumLevels() const {
- return num_levels;
- }
-
- VideoCore::Surface::PixelFormat GetPixelFormat() const {
- return pixel_format;
- }
-
- VideoCore::Surface::ComponentType GetComponentType() const {
- return component_type;
- }
-
- VideoCore::Surface::SurfaceTarget GetTarget() const {
- return target;
- }
-
- VideoCore::Surface::SurfaceType GetType() const {
- return type;
- }
-
- std::size_t GetGuestSizeInBytes() const {
- return guest_size_in_bytes;
- }
-
- std::size_t GetHostSizeInBytes() const {
- return host_size_in_bytes;
- }
-
- u32 GetNumLayers() const {
- return num_layers;
- }
-
- /// Returns the width of a given mipmap level.
- u32 GetMipWidth(u32 level) const;
-
- /// Returns the height of a given mipmap level.
- u32 GetMipHeight(u32 level) const;
-
- /// Returns the depth of a given mipmap level.
- u32 GetMipDepth(u32 level) const;
-
- /// Returns true if these parameters are from a layered surface.
- bool IsLayered() const;
-
- /// Returns the block height of a given mipmap level.
- u32 GetMipBlockHeight(u32 level) const;
-
- /// Returns the block depth of a given mipmap level.
- u32 GetMipBlockDepth(u32 level) const;
-
- /// Returns the offset in bytes in guest memory of a given mipmap level.
- std::size_t GetGuestMipmapLevelOffset(u32 level) const;
-
- /// Returns the offset in bytes in host memory (linear) of a given mipmap level.
- std::size_t GetHostMipmapLevelOffset(u32 level) const;
-
- /// Returns the size of a layer in bytes in guest memory.
- std::size_t GetGuestLayerSize() const;
-
- /// Returns the size of a layer in bytes in host memory for a given mipmap level.
- std::size_t GetHostLayerSize(u32 level) const;
-
- /// Returns true if another surface can be familiar with this. This is a loosely defined term
- /// that reflects the possibility of these two surface parameters potentially being part of a
- /// bigger superset.
- bool IsFamiliar(const SurfaceParams& view_params) const;
-
- /// Returns true if the pixel format is a depth and/or stencil format.
- bool IsPixelFormatZeta() const;
-
- /// Creates a map that redirects an address difference to a layer and mipmap level.
- std::map<u64, std::pair<u32, u32>> CreateViewOffsetMap() const;
-
- /// Returns true if the passed surface view parameters is equal or a valid subset of this.
- bool IsViewValid(const SurfaceParams& view_params, u32 layer, u32 level) const;
-
-private:
- /// Calculates values that can be deduced from HasheableSurfaceParams.
- void CalculateCachedValues();
-
- /// Returns the size of a given mipmap level.
- std::size_t GetInnerMipmapMemorySize(u32 level, bool as_host_size, bool layer_only,
- bool uncompressed) const;
-
- /// Returns the size of all mipmap levels and aligns as needed.
- std::size_t GetInnerMemorySize(bool as_host_size, bool layer_only, bool uncompressed) const;
-
- /// Returns true if the passed view width and height match the size of this params in a given
- /// mipmap level.
- bool IsDimensionValid(const SurfaceParams& view_params, u32 level) const;
-
- /// Returns true if the passed view depth match the size of this params in a given mipmap level.
- bool IsDepthValid(const SurfaceParams& view_params, u32 level) const;
-
- /// Returns true if the passed view layers and mipmap levels are in bounds.
- bool IsInBounds(const SurfaceParams& view_params, u32 layer, u32 level) const;
-
- std::size_t guest_size_in_bytes;
- std::size_t host_size_in_bytes;
- u32 num_layers;
-};
-
-struct ViewKey {
- std::size_t Hash() const;
-
- bool operator==(const ViewKey& rhs) const;
-
- u32 base_layer{};
- u32 num_layers{};
- u32 base_level{};
- u32 num_levels{};
-};
-
-} // namespace VideoCommon
-
-namespace std {
-
-template <>
-struct hash<VideoCommon::SurfaceParams> {
- std::size_t operator()(const VideoCommon::SurfaceParams& k) const noexcept {
- return k.Hash();
- }
-};
-
-template <>
-struct hash<VideoCommon::ViewKey> {
- std::size_t operator()(const VideoCommon::ViewKey& k) const noexcept {
- return k.Hash();
- }
-};
-
-} // namespace std
-
-namespace VideoCommon {
-
-template <typename TView, typename TExecutionContext>
-class SurfaceBase {
- static_assert(std::is_trivially_copyable_v<TExecutionContext>);
-
-public:
- virtual void LoadBuffer() = 0;
-
- virtual TExecutionContext FlushBuffer(TExecutionContext exctx) = 0;
-
- virtual TExecutionContext UploadTexture(TExecutionContext exctx) = 0;
-
- TView* TryGetView(VAddr view_addr, const SurfaceParams& view_params) {
- if (view_addr < cpu_addr || !params.IsFamiliar(view_params)) {
- // It can't be a view if it's in a prior address.
- return {};
- }
-
- const auto relative_offset{static_cast<u64>(view_addr - cpu_addr)};
- const auto it{view_offset_map.find(relative_offset)};
- if (it == view_offset_map.end()) {
- // Couldn't find an aligned view.
- return {};
- }
- const auto [layer, level] = it->second;
-
- if (!params.IsViewValid(view_params, layer, level)) {
- return {};
- }
-
- return GetView(layer, view_params.GetNumLayers(), level, view_params.GetNumLevels());
- }
-
- VAddr GetCpuAddr() const {
- ASSERT(is_registered);
- return cpu_addr;
- }
-
- u8* GetHostPtr() const {
- ASSERT(is_registered);
- return host_ptr;
- }
-
- CacheAddr GetCacheAddr() const {
- ASSERT(is_registered);
- return cache_addr;
- }
-
- std::size_t GetSizeInBytes() const {
- return params.GetGuestSizeInBytes();
- }
-
- void MarkAsModified(bool is_modified_) {
- is_modified = is_modified_;
- }
-
- const SurfaceParams& GetSurfaceParams() const {
- return params;
- }
-
- TView* GetView(VAddr view_addr, const SurfaceParams& view_params) {
- TView* view{TryGetView(view_addr, view_params)};
- ASSERT(view != nullptr);
- return view;
- }
-
- void Register(VAddr cpu_addr_, u8* host_ptr_) {
- ASSERT(!is_registered);
- is_registered = true;
- cpu_addr = cpu_addr_;
- host_ptr = host_ptr_;
- cache_addr = ToCacheAddr(host_ptr_);
- }
-
- void Register(VAddr cpu_addr_) {
- Register(cpu_addr_, Memory::GetPointer(cpu_addr_));
- }
-
- void Unregister() {
- ASSERT(is_registered);
- is_registered = false;
- }
-
- bool IsRegistered() const {
- return is_registered;
- }
-
-protected:
- explicit SurfaceBase(const SurfaceParams& params)
- : params{params}, view_offset_map{params.CreateViewOffsetMap()} {}
-
- ~SurfaceBase() = default;
-
- virtual std::unique_ptr<TView> CreateView(const ViewKey& view_key) = 0;
-
- bool IsModified() const {
- return is_modified;
- }
-
- const SurfaceParams params;
-
-private:
- TView* GetView(u32 base_layer, u32 num_layers, u32 base_level, u32 num_levels) {
- const ViewKey key{base_layer, num_layers, base_level, num_levels};
- const auto [entry, is_cache_miss] = views.try_emplace(key);
- auto& view{entry->second};
- if (is_cache_miss) {
- view = CreateView(key);
- }
- return view.get();
- }
-
- const std::map<u64, std::pair<u32, u32>> view_offset_map;
-
- VAddr cpu_addr{};
- u8* host_ptr{};
- CacheAddr cache_addr{};
- bool is_modified{};
- bool is_registered{};
- std::unordered_map<ViewKey, std::unique_ptr<TView>> views;
-};
-
-template <typename TSurface, typename TView, typename TExecutionContext>
-class TextureCache {
- static_assert(std::is_trivially_copyable_v<TExecutionContext>);
- using ResultType = std::tuple<TView*, TExecutionContext>;
- using IntervalMap = boost::icl::interval_map<CacheAddr, std::set<TSurface*>>;
- using IntervalType = typename IntervalMap::interval_type;
-
-public:
- void InvalidateRegion(CacheAddr addr, std::size_t size) {
- for (TSurface* surface : GetSurfacesInRegion(addr, size)) {
- if (!surface->IsRegistered()) {
- // Skip duplicates
- continue;
- }
- Unregister(surface);
- }
- }
-
- ResultType GetTextureSurface(TExecutionContext exctx,
- const Tegra::Texture::FullTextureInfo& config) {
- auto& memory_manager{system.GPU().MemoryManager()};
- const auto cpu_addr{memory_manager.GpuToCpuAddress(config.tic.Address())};
- if (!cpu_addr) {
- return {{}, exctx};
- }
- const auto params{SurfaceParams::CreateForTexture(system, config)};
- return GetSurfaceView(exctx, *cpu_addr, params, true);
- }
-
- ResultType GetDepthBufferSurface(TExecutionContext exctx, bool preserve_contents) {
- const auto& regs{system.GPU().Maxwell3D().regs};
- if (!regs.zeta.Address() || !regs.zeta_enable) {
- return {{}, exctx};
- }
-
- auto& memory_manager{system.GPU().MemoryManager()};
- const auto cpu_addr{memory_manager.GpuToCpuAddress(regs.zeta.Address())};
- if (!cpu_addr) {
- return {{}, exctx};
- }
-
- const auto depth_params{SurfaceParams::CreateForDepthBuffer(
- system, regs.zeta_width, regs.zeta_height, regs.zeta.format,
- regs.zeta.memory_layout.block_width, regs.zeta.memory_layout.block_height,
- regs.zeta.memory_layout.block_depth, regs.zeta.memory_layout.type)};
- return GetSurfaceView(exctx, *cpu_addr, depth_params, preserve_contents);
- }
-
- ResultType GetColorBufferSurface(TExecutionContext exctx, std::size_t index,
- bool preserve_contents) {
- ASSERT(index < Tegra::Engines::Maxwell3D::Regs::NumRenderTargets);
-
- const auto& regs{system.GPU().Maxwell3D().regs};
- if (index >= regs.rt_control.count || regs.rt[index].Address() == 0 ||
- regs.rt[index].format == Tegra::RenderTargetFormat::NONE) {
- return {{}, exctx};
- }
-
- auto& memory_manager{system.GPU().MemoryManager()};
- const auto& config{system.GPU().Maxwell3D().regs.rt[index]};
- const auto cpu_addr{memory_manager.GpuToCpuAddress(
- config.Address() + config.base_layer * config.layer_stride * sizeof(u32))};
- if (!cpu_addr) {
- return {{}, exctx};
- }
-
- return GetSurfaceView(exctx, *cpu_addr, SurfaceParams::CreateForFramebuffer(system, index),
- preserve_contents);
- }
-
- ResultType GetFermiSurface(TExecutionContext exctx,
- const Tegra::Engines::Fermi2D::Regs::Surface& config) {
- const auto cpu_addr{system.GPU().MemoryManager().GpuToCpuAddress(config.Address())};
- ASSERT(cpu_addr);
- return GetSurfaceView(exctx, *cpu_addr, SurfaceParams::CreateForFermiCopySurface(config),
- true);
- }
-
- TSurface* TryFindFramebufferSurface(const u8* host_ptr) const {
- const auto it{registered_surfaces.find(ToCacheAddr(host_ptr))};
- return it != registered_surfaces.end() ? *it->second.begin() : nullptr;
- }
-
-protected:
- TextureCache(Core::System& system, VideoCore::RasterizerInterface& rasterizer)
- : system{system}, rasterizer{rasterizer} {}
-
- ~TextureCache() = default;
-
- virtual ResultType TryFastGetSurfaceView(TExecutionContext exctx, VAddr cpu_addr, u8* host_ptr,
- const SurfaceParams& params, bool preserve_contents,
- const std::vector<TSurface*>& overlaps) = 0;
-
- virtual std::unique_ptr<TSurface> CreateSurface(const SurfaceParams& params) = 0;
-
- void Register(TSurface* surface, VAddr cpu_addr, u8* host_ptr) {
- surface->Register(cpu_addr, host_ptr);
- registered_surfaces.add({GetSurfaceInterval(surface), {surface}});
- rasterizer.UpdatePagesCachedCount(surface->GetCpuAddr(), surface->GetSizeInBytes(), 1);
- }
-
- void Unregister(TSurface* surface) {
- registered_surfaces.subtract({GetSurfaceInterval(surface), {surface}});
- rasterizer.UpdatePagesCachedCount(surface->GetCpuAddr(), surface->GetSizeInBytes(), -1);
- surface->Unregister();
- }
-
- TSurface* GetUncachedSurface(const SurfaceParams& params) {
- if (TSurface* surface = TryGetReservedSurface(params); surface)
- return surface;
- // No reserved surface available, create a new one and reserve it
- auto new_surface{CreateSurface(params)};
- TSurface* surface{new_surface.get()};
- ReserveSurface(params, std::move(new_surface));
- return surface;
- }
-
- Core::System& system;
-
-private:
- ResultType GetSurfaceView(TExecutionContext exctx, VAddr cpu_addr, const SurfaceParams& params,
- bool preserve_contents) {
- const auto host_ptr{Memory::GetPointer(cpu_addr)};
- const auto cache_addr{ToCacheAddr(host_ptr)};
- const auto overlaps{GetSurfacesInRegion(cache_addr, params.GetGuestSizeInBytes())};
- if (overlaps.empty()) {
- return LoadSurfaceView(exctx, cpu_addr, host_ptr, params, preserve_contents);
- }
-
- if (overlaps.size() == 1) {
- if (TView* view = overlaps[0]->TryGetView(cpu_addr, params); view)
- return {view, exctx};
- }
-
- TView* fast_view;
- std::tie(fast_view, exctx) =
- TryFastGetSurfaceView(exctx, cpu_addr, host_ptr, params, preserve_contents, overlaps);
-
- for (TSurface* surface : overlaps) {
- if (!fast_view) {
- // Flush even when we don't care about the contents, to preserve memory not written
- // by the new surface.
- exctx = surface->FlushBuffer(exctx);
- }
- Unregister(surface);
- }
-
- if (fast_view) {
- return {fast_view, exctx};
- }
-
- return LoadSurfaceView(exctx, cpu_addr, host_ptr, params, preserve_contents);
- }
-
- ResultType LoadSurfaceView(TExecutionContext exctx, VAddr cpu_addr, u8* host_ptr,
- const SurfaceParams& params, bool preserve_contents) {
- TSurface* new_surface{GetUncachedSurface(params)};
- Register(new_surface, cpu_addr, host_ptr);
- if (preserve_contents) {
- exctx = LoadSurface(exctx, new_surface);
- }
- return {new_surface->GetView(cpu_addr, params), exctx};
- }
-
- TExecutionContext LoadSurface(TExecutionContext exctx, TSurface* surface) {
- surface->LoadBuffer();
- exctx = surface->UploadTexture(exctx);
- surface->MarkAsModified(false);
- return exctx;
- }
-
- std::vector<TSurface*> GetSurfacesInRegion(CacheAddr cache_addr, std::size_t size) const {
- if (size == 0) {
- return {};
- }
- const IntervalType interval{cache_addr, cache_addr + size};
-
- std::vector<TSurface*> surfaces;
- for (auto& pair : boost::make_iterator_range(registered_surfaces.equal_range(interval))) {
- surfaces.push_back(*pair.second.begin());
- }
- return surfaces;
- }
-
- void ReserveSurface(const SurfaceParams& params, std::unique_ptr<TSurface> surface) {
- surface_reserve[params].push_back(std::move(surface));
- }
-
- TSurface* TryGetReservedSurface(const SurfaceParams& params) {
- auto search{surface_reserve.find(params)};
- if (search == surface_reserve.end()) {
- return {};
- }
- for (auto& surface : search->second) {
- if (!surface->IsRegistered()) {
- return surface.get();
- }
- }
- return {};
- }
-
- IntervalType GetSurfaceInterval(TSurface* surface) const {
- return IntervalType::right_open(surface->GetCacheAddr(),
- surface->GetCacheAddr() + surface->GetSizeInBytes());
- }
-
- VideoCore::RasterizerInterface& rasterizer;
-
- IntervalMap registered_surfaces;
-
- /// The surface reserve is a "backup" cache, this is where we put unique surfaces that have
- /// previously been used. This is to prevent surfaces from being constantly created and
- /// destroyed when used with different surface parameters.
- std::unordered_map<SurfaceParams, std::list<std::unique_ptr<TSurface>>> surface_reserve;
-};
-
-} // namespace VideoCommon
diff --git a/src/video_core/texture_cache/copy_params.h b/src/video_core/texture_cache/copy_params.h
new file mode 100644
index 000000000..9c21a0649
--- /dev/null
+++ b/src/video_core/texture_cache/copy_params.h
@@ -0,0 +1,36 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include "common/common_types.h"
+
+namespace VideoCommon {
+
+struct CopyParams {
+ constexpr CopyParams(u32 source_x, u32 source_y, u32 source_z, u32 dest_x, u32 dest_y,
+ u32 dest_z, u32 source_level, u32 dest_level, u32 width, u32 height,
+ u32 depth)
+ : source_x{source_x}, source_y{source_y}, source_z{source_z}, dest_x{dest_x},
+ dest_y{dest_y}, dest_z{dest_z}, source_level{source_level},
+ dest_level{dest_level}, width{width}, height{height}, depth{depth} {}
+
+ constexpr CopyParams(u32 width, u32 height, u32 depth, u32 level)
+ : source_x{}, source_y{}, source_z{}, dest_x{}, dest_y{}, dest_z{}, source_level{level},
+ dest_level{level}, width{width}, height{height}, depth{depth} {}
+
+ u32 source_x;
+ u32 source_y;
+ u32 source_z;
+ u32 dest_x;
+ u32 dest_y;
+ u32 dest_z;
+ u32 source_level;
+ u32 dest_level;
+ u32 width;
+ u32 height;
+ u32 depth;
+};
+
+} // namespace VideoCommon
diff --git a/src/video_core/texture_cache/surface_base.cpp b/src/video_core/texture_cache/surface_base.cpp
new file mode 100644
index 000000000..7a0fdb19b
--- /dev/null
+++ b/src/video_core/texture_cache/surface_base.cpp
@@ -0,0 +1,300 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include "common/assert.h"
+#include "common/common_types.h"
+#include "common/microprofile.h"
+#include "video_core/memory_manager.h"
+#include "video_core/texture_cache/surface_base.h"
+#include "video_core/texture_cache/surface_params.h"
+#include "video_core/textures/convert.h"
+
+namespace VideoCommon {
+
+MICROPROFILE_DEFINE(GPU_Load_Texture, "GPU", "Texture Load", MP_RGB(128, 192, 128));
+MICROPROFILE_DEFINE(GPU_Flush_Texture, "GPU", "Texture Flush", MP_RGB(128, 192, 128));
+
+using Tegra::Texture::ConvertFromGuestToHost;
+using VideoCore::MortonSwizzleMode;
+using VideoCore::Surface::SurfaceCompression;
+
+StagingCache::StagingCache() = default;
+
+StagingCache::~StagingCache() = default;
+
+SurfaceBaseImpl::SurfaceBaseImpl(GPUVAddr gpu_addr, const SurfaceParams& params)
+ : params{params}, mipmap_sizes(params.num_levels),
+ mipmap_offsets(params.num_levels), gpu_addr{gpu_addr}, host_memory_size{
+ params.GetHostSizeInBytes()} {
+ std::size_t offset = 0;
+ for (u32 level = 0; level < params.num_levels; ++level) {
+ const std::size_t mipmap_size{params.GetGuestMipmapSize(level)};
+ mipmap_sizes[level] = mipmap_size;
+ mipmap_offsets[level] = offset;
+ offset += mipmap_size;
+ }
+ layer_size = offset;
+ if (params.is_layered) {
+ if (params.is_tiled) {
+ layer_size =
+ SurfaceParams::AlignLayered(layer_size, params.block_height, params.block_depth);
+ }
+ guest_memory_size = layer_size * params.depth;
+ } else {
+ guest_memory_size = layer_size;
+ }
+}
+
+MatchTopologyResult SurfaceBaseImpl::MatchesTopology(const SurfaceParams& rhs) const {
+ const u32 src_bpp{params.GetBytesPerPixel()};
+ const u32 dst_bpp{rhs.GetBytesPerPixel()};
+ const bool ib1 = params.IsBuffer();
+ const bool ib2 = rhs.IsBuffer();
+ if (std::tie(src_bpp, params.is_tiled, ib1) == std::tie(dst_bpp, rhs.is_tiled, ib2)) {
+ const bool cb1 = params.IsCompressed();
+ const bool cb2 = rhs.IsCompressed();
+ if (cb1 == cb2) {
+ return MatchTopologyResult::FullMatch;
+ }
+ return MatchTopologyResult::CompressUnmatch;
+ }
+ return MatchTopologyResult::None;
+}
+
+MatchStructureResult SurfaceBaseImpl::MatchesStructure(const SurfaceParams& rhs) const {
+ // Buffer surface Check
+ if (params.IsBuffer()) {
+ const std::size_t wd1 = params.width * params.GetBytesPerPixel();
+ const std::size_t wd2 = rhs.width * rhs.GetBytesPerPixel();
+ if (wd1 == wd2) {
+ return MatchStructureResult::FullMatch;
+ }
+ return MatchStructureResult::None;
+ }
+
+ // Linear Surface check
+ if (!params.is_tiled) {
+ if (std::tie(params.width, params.height, params.pitch) ==
+ std::tie(rhs.width, rhs.height, rhs.pitch)) {
+ return MatchStructureResult::FullMatch;
+ }
+ return MatchStructureResult::None;
+ }
+
+ // Tiled Surface check
+ if (std::tie(params.depth, params.block_width, params.block_height, params.block_depth,
+ params.tile_width_spacing, params.num_levels) ==
+ std::tie(rhs.depth, rhs.block_width, rhs.block_height, rhs.block_depth,
+ rhs.tile_width_spacing, rhs.num_levels)) {
+ if (std::tie(params.width, params.height) == std::tie(rhs.width, rhs.height)) {
+ return MatchStructureResult::FullMatch;
+ }
+ const u32 ws = SurfaceParams::ConvertWidth(rhs.GetBlockAlignedWidth(), params.pixel_format,
+ rhs.pixel_format);
+ const u32 hs =
+ SurfaceParams::ConvertHeight(rhs.height, params.pixel_format, rhs.pixel_format);
+ const u32 w1 = params.GetBlockAlignedWidth();
+ if (std::tie(w1, params.height) == std::tie(ws, hs)) {
+ return MatchStructureResult::SemiMatch;
+ }
+ }
+ return MatchStructureResult::None;
+}
+
+std::optional<std::pair<u32, u32>> SurfaceBaseImpl::GetLayerMipmap(
+ const GPUVAddr candidate_gpu_addr) const {
+ if (gpu_addr == candidate_gpu_addr) {
+ return {{0, 0}};
+ }
+ if (candidate_gpu_addr < gpu_addr) {
+ return {};
+ }
+ const auto relative_address{static_cast<GPUVAddr>(candidate_gpu_addr - gpu_addr)};
+ const auto layer{static_cast<u32>(relative_address / layer_size)};
+ const GPUVAddr mipmap_address = relative_address - layer_size * layer;
+ const auto mipmap_it =
+ Common::BinaryFind(mipmap_offsets.begin(), mipmap_offsets.end(), mipmap_address);
+ if (mipmap_it == mipmap_offsets.end()) {
+ return {};
+ }
+ const auto level{static_cast<u32>(std::distance(mipmap_offsets.begin(), mipmap_it))};
+ return std::make_pair(layer, level);
+}
+
+std::vector<CopyParams> SurfaceBaseImpl::BreakDownLayered(const SurfaceParams& in_params) const {
+ const u32 layers{params.depth};
+ const u32 mipmaps{params.num_levels};
+ std::vector<CopyParams> result;
+ result.reserve(static_cast<std::size_t>(layers) * static_cast<std::size_t>(mipmaps));
+
+ for (u32 layer = 0; layer < layers; layer++) {
+ for (u32 level = 0; level < mipmaps; level++) {
+ const u32 width = SurfaceParams::IntersectWidth(params, in_params, level, level);
+ const u32 height = SurfaceParams::IntersectHeight(params, in_params, level, level);
+ result.emplace_back(width, height, layer, level);
+ }
+ }
+ return result;
+}
+
+std::vector<CopyParams> SurfaceBaseImpl::BreakDownNonLayered(const SurfaceParams& in_params) const {
+ const u32 mipmaps{params.num_levels};
+ std::vector<CopyParams> result;
+ result.reserve(mipmaps);
+
+ for (u32 level = 0; level < mipmaps; level++) {
+ const u32 width = SurfaceParams::IntersectWidth(params, in_params, level, level);
+ const u32 height = SurfaceParams::IntersectHeight(params, in_params, level, level);
+ const u32 depth{std::min(params.GetMipDepth(level), in_params.GetMipDepth(level))};
+ result.emplace_back(width, height, depth, level);
+ }
+ return result;
+}
+
+void SurfaceBaseImpl::SwizzleFunc(MortonSwizzleMode mode, u8* memory, const SurfaceParams& params,
+ u8* buffer, u32 level) {
+ const u32 width{params.GetMipWidth(level)};
+ const u32 height{params.GetMipHeight(level)};
+ const u32 block_height{params.GetMipBlockHeight(level)};
+ const u32 block_depth{params.GetMipBlockDepth(level)};
+
+ std::size_t guest_offset{mipmap_offsets[level]};
+ if (params.is_layered) {
+ std::size_t host_offset{0};
+ const std::size_t guest_stride = layer_size;
+ const std::size_t host_stride = params.GetHostLayerSize(level);
+ for (u32 layer = 0; layer < params.depth; ++layer) {
+ MortonSwizzle(mode, params.pixel_format, width, block_height, height, block_depth, 1,
+ params.tile_width_spacing, buffer + host_offset, memory + guest_offset);
+ guest_offset += guest_stride;
+ host_offset += host_stride;
+ }
+ } else {
+ MortonSwizzle(mode, params.pixel_format, width, block_height, height, block_depth,
+ params.GetMipDepth(level), params.tile_width_spacing, buffer,
+ memory + guest_offset);
+ }
+}
+
+void SurfaceBaseImpl::LoadBuffer(Tegra::MemoryManager& memory_manager,
+ StagingCache& staging_cache) {
+ MICROPROFILE_SCOPE(GPU_Load_Texture);
+ auto& staging_buffer = staging_cache.GetBuffer(0);
+ u8* host_ptr;
+ is_continuous = memory_manager.IsBlockContinuous(gpu_addr, guest_memory_size);
+
+ // Handle continuouty
+ if (is_continuous) {
+ // Use physical memory directly
+ host_ptr = memory_manager.GetPointer(gpu_addr);
+ if (!host_ptr) {
+ return;
+ }
+ } else {
+ // Use an extra temporal buffer
+ auto& tmp_buffer = staging_cache.GetBuffer(1);
+ tmp_buffer.resize(guest_memory_size);
+ host_ptr = tmp_buffer.data();
+ memory_manager.ReadBlockUnsafe(gpu_addr, host_ptr, guest_memory_size);
+ }
+
+ if (params.is_tiled) {
+ ASSERT_MSG(params.block_width == 0, "Block width is defined as {} on texture target {}",
+ params.block_width, static_cast<u32>(params.target));
+ for (u32 level = 0; level < params.num_levels; ++level) {
+ const std::size_t host_offset{params.GetHostMipmapLevelOffset(level)};
+ SwizzleFunc(MortonSwizzleMode::MortonToLinear, host_ptr, params,
+ staging_buffer.data() + host_offset, level);
+ }
+ } else {
+ ASSERT_MSG(params.num_levels == 1, "Linear mipmap loading is not implemented");
+ const u32 bpp{params.GetBytesPerPixel()};
+ const u32 block_width{params.GetDefaultBlockWidth()};
+ const u32 block_height{params.GetDefaultBlockHeight()};
+ const u32 width{(params.width + block_width - 1) / block_width};
+ const u32 height{(params.height + block_height - 1) / block_height};
+ const u32 copy_size{width * bpp};
+ if (params.pitch == copy_size) {
+ std::memcpy(staging_buffer.data(), host_ptr, params.GetHostSizeInBytes());
+ } else {
+ const u8* start{host_ptr};
+ u8* write_to{staging_buffer.data()};
+ for (u32 h = height; h > 0; --h) {
+ std::memcpy(write_to, start, copy_size);
+ start += params.pitch;
+ write_to += copy_size;
+ }
+ }
+ }
+
+ auto compression_type = params.GetCompressionType();
+ if (compression_type == SurfaceCompression::None ||
+ compression_type == SurfaceCompression::Compressed)
+ return;
+
+ for (u32 level_up = params.num_levels; level_up > 0; --level_up) {
+ const u32 level = level_up - 1;
+ const std::size_t in_host_offset{params.GetHostMipmapLevelOffset(level)};
+ const std::size_t out_host_offset = compression_type == SurfaceCompression::Rearranged
+ ? in_host_offset
+ : params.GetConvertedMipmapOffset(level);
+ u8* in_buffer = staging_buffer.data() + in_host_offset;
+ u8* out_buffer = staging_buffer.data() + out_host_offset;
+ ConvertFromGuestToHost(in_buffer, out_buffer, params.pixel_format,
+ params.GetMipWidth(level), params.GetMipHeight(level),
+ params.GetMipDepth(level), true, true);
+ }
+}
+
+void SurfaceBaseImpl::FlushBuffer(Tegra::MemoryManager& memory_manager,
+ StagingCache& staging_cache) {
+ MICROPROFILE_SCOPE(GPU_Flush_Texture);
+ auto& staging_buffer = staging_cache.GetBuffer(0);
+ u8* host_ptr;
+
+ // Handle continuouty
+ if (is_continuous) {
+ // Use physical memory directly
+ host_ptr = memory_manager.GetPointer(gpu_addr);
+ if (!host_ptr) {
+ return;
+ }
+ } else {
+ // Use an extra temporal buffer
+ auto& tmp_buffer = staging_cache.GetBuffer(1);
+ tmp_buffer.resize(guest_memory_size);
+ host_ptr = tmp_buffer.data();
+ }
+
+ if (params.is_tiled) {
+ ASSERT_MSG(params.block_width == 0, "Block width is defined as {}", params.block_width);
+ for (u32 level = 0; level < params.num_levels; ++level) {
+ const std::size_t host_offset{params.GetHostMipmapLevelOffset(level)};
+ SwizzleFunc(MortonSwizzleMode::LinearToMorton, host_ptr, params,
+ staging_buffer.data() + host_offset, level);
+ }
+ } else {
+ ASSERT(params.target == SurfaceTarget::Texture2D);
+ ASSERT(params.num_levels == 1);
+
+ const u32 bpp{params.GetBytesPerPixel()};
+ const u32 copy_size{params.width * bpp};
+ if (params.pitch == copy_size) {
+ std::memcpy(host_ptr, staging_buffer.data(), guest_memory_size);
+ } else {
+ u8* start{host_ptr};
+ const u8* read_to{staging_buffer.data()};
+ for (u32 h = params.height; h > 0; --h) {
+ std::memcpy(start, read_to, copy_size);
+ start += params.pitch;
+ read_to += copy_size;
+ }
+ }
+ }
+ if (!is_continuous) {
+ memory_manager.WriteBlockUnsafe(gpu_addr, host_ptr, guest_memory_size);
+ }
+}
+
+} // namespace VideoCommon
diff --git a/src/video_core/texture_cache/surface_base.h b/src/video_core/texture_cache/surface_base.h
new file mode 100644
index 000000000..8ba386a8a
--- /dev/null
+++ b/src/video_core/texture_cache/surface_base.h
@@ -0,0 +1,317 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <algorithm>
+#include <unordered_map>
+#include <vector>
+
+#include "common/assert.h"
+#include "common/binary_find.h"
+#include "common/common_types.h"
+#include "video_core/gpu.h"
+#include "video_core/morton.h"
+#include "video_core/texture_cache/copy_params.h"
+#include "video_core/texture_cache/surface_params.h"
+#include "video_core/texture_cache/surface_view.h"
+
+namespace Tegra {
+class MemoryManager;
+}
+
+namespace VideoCommon {
+
+using VideoCore::MortonSwizzleMode;
+using VideoCore::Surface::SurfaceTarget;
+
+enum class MatchStructureResult : u32 {
+ FullMatch = 0,
+ SemiMatch = 1,
+ None = 2,
+};
+
+enum class MatchTopologyResult : u32 {
+ FullMatch = 0,
+ CompressUnmatch = 1,
+ None = 2,
+};
+
+class StagingCache {
+public:
+ explicit StagingCache();
+ ~StagingCache();
+
+ std::vector<u8>& GetBuffer(std::size_t index) {
+ return staging_buffer[index];
+ }
+
+ const std::vector<u8>& GetBuffer(std::size_t index) const {
+ return staging_buffer[index];
+ }
+
+ void SetSize(std::size_t size) {
+ staging_buffer.resize(size);
+ }
+
+private:
+ std::vector<std::vector<u8>> staging_buffer;
+};
+
+class SurfaceBaseImpl {
+public:
+ void LoadBuffer(Tegra::MemoryManager& memory_manager, StagingCache& staging_cache);
+
+ void FlushBuffer(Tegra::MemoryManager& memory_manager, StagingCache& staging_cache);
+
+ GPUVAddr GetGpuAddr() const {
+ return gpu_addr;
+ }
+
+ bool Overlaps(const CacheAddr start, const CacheAddr end) const {
+ return (cache_addr < end) && (cache_addr_end > start);
+ }
+
+ bool IsInside(const GPUVAddr other_start, const GPUVAddr other_end) {
+ const GPUVAddr gpu_addr_end = gpu_addr + guest_memory_size;
+ return (gpu_addr <= other_start && other_end <= gpu_addr_end);
+ }
+
+ // Use only when recycling a surface
+ void SetGpuAddr(const GPUVAddr new_addr) {
+ gpu_addr = new_addr;
+ }
+
+ VAddr GetCpuAddr() const {
+ return cpu_addr;
+ }
+
+ void SetCpuAddr(const VAddr new_addr) {
+ cpu_addr = new_addr;
+ }
+
+ CacheAddr GetCacheAddr() const {
+ return cache_addr;
+ }
+
+ CacheAddr GetCacheAddrEnd() const {
+ return cache_addr_end;
+ }
+
+ void SetCacheAddr(const CacheAddr new_addr) {
+ cache_addr = new_addr;
+ cache_addr_end = new_addr + guest_memory_size;
+ }
+
+ const SurfaceParams& GetSurfaceParams() const {
+ return params;
+ }
+
+ std::size_t GetSizeInBytes() const {
+ return guest_memory_size;
+ }
+
+ std::size_t GetHostSizeInBytes() const {
+ return host_memory_size;
+ }
+
+ std::size_t GetMipmapSize(const u32 level) const {
+ return mipmap_sizes[level];
+ }
+
+ void MarkAsContinuous(const bool is_continuous) {
+ this->is_continuous = is_continuous;
+ }
+
+ bool IsContinuous() const {
+ return is_continuous;
+ }
+
+ bool IsLinear() const {
+ return !params.is_tiled;
+ }
+
+ bool MatchFormat(VideoCore::Surface::PixelFormat pixel_format) const {
+ return params.pixel_format == pixel_format;
+ }
+
+ VideoCore::Surface::PixelFormat GetFormat() const {
+ return params.pixel_format;
+ }
+
+ bool MatchTarget(VideoCore::Surface::SurfaceTarget target) const {
+ return params.target == target;
+ }
+
+ MatchTopologyResult MatchesTopology(const SurfaceParams& rhs) const;
+
+ MatchStructureResult MatchesStructure(const SurfaceParams& rhs) const;
+
+ bool MatchesSubTexture(const SurfaceParams& rhs, const GPUVAddr other_gpu_addr) const {
+ return std::tie(gpu_addr, params.target, params.num_levels) ==
+ std::tie(other_gpu_addr, rhs.target, rhs.num_levels) &&
+ params.target == SurfaceTarget::Texture2D && params.num_levels == 1;
+ }
+
+ std::optional<std::pair<u32, u32>> GetLayerMipmap(const GPUVAddr candidate_gpu_addr) const;
+
+ std::vector<CopyParams> BreakDown(const SurfaceParams& in_params) const {
+ return params.is_layered ? BreakDownLayered(in_params) : BreakDownNonLayered(in_params);
+ }
+
+protected:
+ explicit SurfaceBaseImpl(GPUVAddr gpu_addr, const SurfaceParams& params);
+ ~SurfaceBaseImpl() = default;
+
+ virtual void DecorateSurfaceName() = 0;
+
+ const SurfaceParams params;
+ std::size_t layer_size;
+ std::size_t guest_memory_size;
+ const std::size_t host_memory_size;
+ GPUVAddr gpu_addr{};
+ CacheAddr cache_addr{};
+ CacheAddr cache_addr_end{};
+ VAddr cpu_addr{};
+ bool is_continuous{};
+
+ std::vector<std::size_t> mipmap_sizes;
+ std::vector<std::size_t> mipmap_offsets;
+
+private:
+ void SwizzleFunc(MortonSwizzleMode mode, u8* memory, const SurfaceParams& params, u8* buffer,
+ u32 level);
+
+ std::vector<CopyParams> BreakDownLayered(const SurfaceParams& in_params) const;
+
+ std::vector<CopyParams> BreakDownNonLayered(const SurfaceParams& in_params) const;
+};
+
+template <typename TView>
+class SurfaceBase : public SurfaceBaseImpl {
+public:
+ virtual void UploadTexture(const std::vector<u8>& staging_buffer) = 0;
+
+ virtual void DownloadTexture(std::vector<u8>& staging_buffer) = 0;
+
+ void MarkAsModified(const bool is_modified_, const u64 tick) {
+ is_modified = is_modified_ || is_target;
+ modification_tick = tick;
+ }
+
+ void MarkAsRenderTarget(const bool is_target) {
+ this->is_target = is_target;
+ }
+
+ void MarkAsPicked(const bool is_picked) {
+ this->is_picked = is_picked;
+ }
+
+ bool IsModified() const {
+ return is_modified;
+ }
+
+ bool IsProtected() const {
+ // Only 3D Slices are to be protected
+ return is_target && params.block_depth > 0;
+ }
+
+ bool IsRenderTarget() const {
+ return is_target;
+ }
+
+ bool IsRegistered() const {
+ return is_registered;
+ }
+
+ bool IsPicked() const {
+ return is_picked;
+ }
+
+ void MarkAsRegistered(bool is_reg) {
+ is_registered = is_reg;
+ }
+
+ u64 GetModificationTick() const {
+ return modification_tick;
+ }
+
+ TView EmplaceOverview(const SurfaceParams& overview_params) {
+ const u32 num_layers{(params.is_layered && !overview_params.is_layered) ? 1 : params.depth};
+ return GetView(ViewParams(overview_params.target, 0, num_layers, 0, params.num_levels));
+ }
+
+ std::optional<TView> EmplaceIrregularView(const SurfaceParams& view_params,
+ const GPUVAddr view_addr,
+ const std::size_t candidate_size, const u32 mipmap,
+ const u32 layer) {
+ const auto layer_mipmap{GetLayerMipmap(view_addr + candidate_size)};
+ if (!layer_mipmap) {
+ return {};
+ }
+ const u32 end_layer{layer_mipmap->first};
+ const u32 end_mipmap{layer_mipmap->second};
+ if (layer != end_layer) {
+ if (mipmap == 0 && end_mipmap == 0) {
+ return GetView(ViewParams(view_params.target, layer, end_layer - layer + 1, 0, 1));
+ }
+ return {};
+ } else {
+ return GetView(
+ ViewParams(view_params.target, layer, 1, mipmap, end_mipmap - mipmap + 1));
+ }
+ }
+
+ std::optional<TView> EmplaceView(const SurfaceParams& view_params, const GPUVAddr view_addr,
+ const std::size_t candidate_size) {
+ if (params.target == SurfaceTarget::Texture3D ||
+ (params.num_levels == 1 && !params.is_layered) ||
+ view_params.target == SurfaceTarget::Texture3D) {
+ return {};
+ }
+ const auto layer_mipmap{GetLayerMipmap(view_addr)};
+ if (!layer_mipmap) {
+ return {};
+ }
+ const u32 layer{layer_mipmap->first};
+ const u32 mipmap{layer_mipmap->second};
+ if (GetMipmapSize(mipmap) != candidate_size) {
+ return EmplaceIrregularView(view_params, view_addr, candidate_size, mipmap, layer);
+ }
+ return GetView(ViewParams(view_params.target, layer, 1, mipmap, 1));
+ }
+
+ TView GetMainView() const {
+ return main_view;
+ }
+
+protected:
+ explicit SurfaceBase(const GPUVAddr gpu_addr, const SurfaceParams& params)
+ : SurfaceBaseImpl(gpu_addr, params) {}
+
+ ~SurfaceBase() = default;
+
+ virtual TView CreateView(const ViewParams& view_key) = 0;
+
+ TView main_view;
+ std::unordered_map<ViewParams, TView> views;
+
+private:
+ TView GetView(const ViewParams& key) {
+ const auto [entry, is_cache_miss] = views.try_emplace(key);
+ auto& view{entry->second};
+ if (is_cache_miss) {
+ view = CreateView(key);
+ }
+ return view;
+ }
+
+ bool is_modified{};
+ bool is_target{};
+ bool is_registered{};
+ bool is_picked{};
+ u64 modification_tick{};
+};
+
+} // namespace VideoCommon
diff --git a/src/video_core/texture_cache/surface_params.cpp b/src/video_core/texture_cache/surface_params.cpp
new file mode 100644
index 000000000..9c56e2b4f
--- /dev/null
+++ b/src/video_core/texture_cache/surface_params.cpp
@@ -0,0 +1,334 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include <map>
+
+#include "common/alignment.h"
+#include "common/bit_util.h"
+#include "core/core.h"
+#include "video_core/engines/shader_bytecode.h"
+#include "video_core/surface.h"
+#include "video_core/texture_cache/surface_params.h"
+
+namespace VideoCommon {
+
+using VideoCore::Surface::ComponentTypeFromDepthFormat;
+using VideoCore::Surface::ComponentTypeFromRenderTarget;
+using VideoCore::Surface::ComponentTypeFromTexture;
+using VideoCore::Surface::PixelFormat;
+using VideoCore::Surface::PixelFormatFromDepthFormat;
+using VideoCore::Surface::PixelFormatFromRenderTargetFormat;
+using VideoCore::Surface::PixelFormatFromTextureFormat;
+using VideoCore::Surface::SurfaceTarget;
+using VideoCore::Surface::SurfaceTargetFromTextureType;
+using VideoCore::Surface::SurfaceType;
+
+SurfaceTarget TextureType2SurfaceTarget(Tegra::Shader::TextureType type, bool is_array) {
+ switch (type) {
+ case Tegra::Shader::TextureType::Texture1D: {
+ if (is_array)
+ return SurfaceTarget::Texture1DArray;
+ else
+ return SurfaceTarget::Texture1D;
+ }
+ case Tegra::Shader::TextureType::Texture2D: {
+ if (is_array)
+ return SurfaceTarget::Texture2DArray;
+ else
+ return SurfaceTarget::Texture2D;
+ }
+ case Tegra::Shader::TextureType::Texture3D: {
+ ASSERT(!is_array);
+ return SurfaceTarget::Texture3D;
+ }
+ case Tegra::Shader::TextureType::TextureCube: {
+ if (is_array)
+ return SurfaceTarget::TextureCubeArray;
+ else
+ return SurfaceTarget::TextureCubemap;
+ }
+ default: {
+ UNREACHABLE();
+ return SurfaceTarget::Texture2D;
+ }
+ }
+}
+
+namespace {
+constexpr u32 GetMipmapSize(bool uncompressed, u32 mip_size, u32 tile) {
+ return uncompressed ? mip_size : std::max(1U, (mip_size + tile - 1) / tile);
+}
+} // Anonymous namespace
+
+SurfaceParams SurfaceParams::CreateForTexture(Core::System& system,
+ const Tegra::Texture::FullTextureInfo& config,
+ const VideoCommon::Shader::Sampler& entry) {
+ SurfaceParams params;
+ params.is_tiled = config.tic.IsTiled();
+ params.srgb_conversion = config.tic.IsSrgbConversionEnabled();
+ params.block_width = params.is_tiled ? config.tic.BlockWidth() : 0,
+ params.block_height = params.is_tiled ? config.tic.BlockHeight() : 0,
+ params.block_depth = params.is_tiled ? config.tic.BlockDepth() : 0,
+ params.tile_width_spacing = params.is_tiled ? (1 << config.tic.tile_width_spacing.Value()) : 1;
+ params.pixel_format = PixelFormatFromTextureFormat(config.tic.format, config.tic.r_type.Value(),
+ params.srgb_conversion);
+ params.type = GetFormatType(params.pixel_format);
+ if (entry.IsShadow() && params.type == SurfaceType::ColorTexture) {
+ switch (params.pixel_format) {
+ case PixelFormat::R16U:
+ case PixelFormat::R16F: {
+ params.pixel_format = PixelFormat::Z16;
+ break;
+ }
+ case PixelFormat::R32F: {
+ params.pixel_format = PixelFormat::Z32F;
+ break;
+ }
+ default: {
+ UNIMPLEMENTED_MSG("Unimplemented shadow convert format: {}",
+ static_cast<u32>(params.pixel_format));
+ }
+ }
+ params.type = GetFormatType(params.pixel_format);
+ }
+ params.component_type = ComponentTypeFromTexture(config.tic.r_type.Value());
+ params.type = GetFormatType(params.pixel_format);
+ // TODO: on 1DBuffer we should use the tic info.
+ if (!config.tic.IsBuffer()) {
+ params.target = TextureType2SurfaceTarget(entry.GetType(), entry.IsArray());
+ params.width = config.tic.Width();
+ params.height = config.tic.Height();
+ params.depth = config.tic.Depth();
+ params.pitch = params.is_tiled ? 0 : config.tic.Pitch();
+ if (params.target == SurfaceTarget::TextureCubemap ||
+ params.target == SurfaceTarget::TextureCubeArray) {
+ params.depth *= 6;
+ }
+ params.num_levels = config.tic.max_mip_level + 1;
+ params.emulated_levels = std::min(params.num_levels, params.MaxPossibleMipmap());
+ params.is_layered = params.IsLayered();
+ } else {
+ params.target = SurfaceTarget::TextureBuffer;
+ params.width = config.tic.Width();
+ params.pitch = params.width * params.GetBytesPerPixel();
+ params.height = 1;
+ params.depth = 1;
+ params.num_levels = 1;
+ params.emulated_levels = 1;
+ params.is_layered = false;
+ }
+ return params;
+}
+
+SurfaceParams SurfaceParams::CreateForDepthBuffer(
+ Core::System& system, u32 zeta_width, u32 zeta_height, Tegra::DepthFormat format,
+ u32 block_width, u32 block_height, u32 block_depth,
+ Tegra::Engines::Maxwell3D::Regs::InvMemoryLayout type) {
+ SurfaceParams params;
+ params.is_tiled = type == Tegra::Engines::Maxwell3D::Regs::InvMemoryLayout::BlockLinear;
+ params.srgb_conversion = false;
+ params.block_width = std::min(block_width, 5U);
+ params.block_height = std::min(block_height, 5U);
+ params.block_depth = std::min(block_depth, 5U);
+ params.tile_width_spacing = 1;
+ params.pixel_format = PixelFormatFromDepthFormat(format);
+ params.component_type = ComponentTypeFromDepthFormat(format);
+ params.type = GetFormatType(params.pixel_format);
+ params.width = zeta_width;
+ params.height = zeta_height;
+ params.target = SurfaceTarget::Texture2D;
+ params.depth = 1;
+ params.pitch = 0;
+ params.num_levels = 1;
+ params.emulated_levels = 1;
+ params.is_layered = false;
+ return params;
+}
+
+SurfaceParams SurfaceParams::CreateForFramebuffer(Core::System& system, std::size_t index) {
+ const auto& config{system.GPU().Maxwell3D().regs.rt[index]};
+ SurfaceParams params;
+ params.is_tiled =
+ config.memory_layout.type == Tegra::Engines::Maxwell3D::Regs::InvMemoryLayout::BlockLinear;
+ params.srgb_conversion = config.format == Tegra::RenderTargetFormat::BGRA8_SRGB ||
+ config.format == Tegra::RenderTargetFormat::RGBA8_SRGB;
+ params.block_width = config.memory_layout.block_width;
+ params.block_height = config.memory_layout.block_height;
+ params.block_depth = config.memory_layout.block_depth;
+ params.tile_width_spacing = 1;
+ params.pixel_format = PixelFormatFromRenderTargetFormat(config.format);
+ params.component_type = ComponentTypeFromRenderTarget(config.format);
+ params.type = GetFormatType(params.pixel_format);
+ if (params.is_tiled) {
+ params.pitch = 0;
+ params.width = config.width;
+ } else {
+ const u32 bpp = GetFormatBpp(params.pixel_format) / CHAR_BIT;
+ params.pitch = config.width;
+ params.width = params.pitch / bpp;
+ }
+ params.height = config.height;
+ params.depth = 1;
+ params.target = SurfaceTarget::Texture2D;
+ params.num_levels = 1;
+ params.emulated_levels = 1;
+ params.is_layered = false;
+ return params;
+}
+
+SurfaceParams SurfaceParams::CreateForFermiCopySurface(
+ const Tegra::Engines::Fermi2D::Regs::Surface& config) {
+ SurfaceParams params{};
+ params.is_tiled = !config.linear;
+ params.srgb_conversion = config.format == Tegra::RenderTargetFormat::BGRA8_SRGB ||
+ config.format == Tegra::RenderTargetFormat::RGBA8_SRGB;
+ params.block_width = params.is_tiled ? std::min(config.BlockWidth(), 5U) : 0,
+ params.block_height = params.is_tiled ? std::min(config.BlockHeight(), 5U) : 0,
+ params.block_depth = params.is_tiled ? std::min(config.BlockDepth(), 5U) : 0,
+ params.tile_width_spacing = 1;
+ params.pixel_format = PixelFormatFromRenderTargetFormat(config.format);
+ params.component_type = ComponentTypeFromRenderTarget(config.format);
+ params.type = GetFormatType(params.pixel_format);
+ params.width = config.width;
+ params.height = config.height;
+ params.pitch = config.pitch;
+ // TODO(Rodrigo): Try to guess the surface target from depth and layer parameters
+ params.target = SurfaceTarget::Texture2D;
+ params.depth = 1;
+ params.num_levels = 1;
+ params.emulated_levels = 1;
+ params.is_layered = params.IsLayered();
+ return params;
+}
+
+bool SurfaceParams::IsLayered() const {
+ switch (target) {
+ case SurfaceTarget::Texture1DArray:
+ case SurfaceTarget::Texture2DArray:
+ case SurfaceTarget::TextureCubemap:
+ case SurfaceTarget::TextureCubeArray:
+ return true;
+ default:
+ return false;
+ }
+}
+
+// Auto block resizing algorithm from:
+// https://cgit.freedesktop.org/mesa/mesa/tree/src/gallium/drivers/nouveau/nv50/nv50_miptree.c
+u32 SurfaceParams::GetMipBlockHeight(u32 level) const {
+ if (level == 0) {
+ return this->block_height;
+ }
+
+ const u32 height_new{GetMipHeight(level)};
+ const u32 default_block_height{GetDefaultBlockHeight()};
+ const u32 blocks_in_y{(height_new + default_block_height - 1) / default_block_height};
+ const u32 block_height_new = Common::Log2Ceil32(blocks_in_y);
+ return std::clamp(block_height_new, 3U, 7U) - 3U;
+}
+
+u32 SurfaceParams::GetMipBlockDepth(u32 level) const {
+ if (level == 0) {
+ return this->block_depth;
+ }
+ if (is_layered) {
+ return 0;
+ }
+
+ const u32 depth_new{GetMipDepth(level)};
+ const u32 block_depth_new = Common::Log2Ceil32(depth_new);
+ if (block_depth_new > 4) {
+ return 5 - (GetMipBlockHeight(level) >= 2);
+ }
+ return block_depth_new;
+}
+
+std::size_t SurfaceParams::GetGuestMipmapLevelOffset(u32 level) const {
+ std::size_t offset = 0;
+ for (u32 i = 0; i < level; i++) {
+ offset += GetInnerMipmapMemorySize(i, false, false);
+ }
+ return offset;
+}
+
+std::size_t SurfaceParams::GetHostMipmapLevelOffset(u32 level) const {
+ std::size_t offset = 0;
+ for (u32 i = 0; i < level; i++) {
+ offset += GetInnerMipmapMemorySize(i, true, false) * GetNumLayers();
+ }
+ return offset;
+}
+
+std::size_t SurfaceParams::GetConvertedMipmapOffset(u32 level) const {
+ std::size_t offset = 0;
+ for (u32 i = 0; i < level; i++) {
+ offset += GetConvertedMipmapSize(i);
+ }
+ return offset;
+}
+
+std::size_t SurfaceParams::GetConvertedMipmapSize(u32 level) const {
+ constexpr std::size_t rgba8_bpp = 4ULL;
+ const std::size_t width_t = GetMipWidth(level);
+ const std::size_t height_t = GetMipHeight(level);
+ const std::size_t depth_t = is_layered ? depth : GetMipDepth(level);
+ return width_t * height_t * depth_t * rgba8_bpp;
+}
+
+std::size_t SurfaceParams::GetLayerSize(bool as_host_size, bool uncompressed) const {
+ std::size_t size = 0;
+ for (u32 level = 0; level < num_levels; ++level) {
+ size += GetInnerMipmapMemorySize(level, as_host_size, uncompressed);
+ }
+ if (is_tiled && is_layered) {
+ return Common::AlignBits(size,
+ Tegra::Texture::GetGOBSizeShift() + block_height + block_depth);
+ }
+ return size;
+}
+
+std::size_t SurfaceParams::GetInnerMipmapMemorySize(u32 level, bool as_host_size,
+ bool uncompressed) const {
+ const bool tiled{as_host_size ? false : is_tiled};
+ const u32 width{GetMipmapSize(uncompressed, GetMipWidth(level), GetDefaultBlockWidth())};
+ const u32 height{GetMipmapSize(uncompressed, GetMipHeight(level), GetDefaultBlockHeight())};
+ const u32 depth{is_layered ? 1U : GetMipDepth(level)};
+ return Tegra::Texture::CalculateSize(tiled, GetBytesPerPixel(), width, height, depth,
+ GetMipBlockHeight(level), GetMipBlockDepth(level));
+}
+
+bool SurfaceParams::operator==(const SurfaceParams& rhs) const {
+ return std::tie(is_tiled, block_width, block_height, block_depth, tile_width_spacing, width,
+ height, depth, pitch, num_levels, pixel_format, component_type, type, target) ==
+ std::tie(rhs.is_tiled, rhs.block_width, rhs.block_height, rhs.block_depth,
+ rhs.tile_width_spacing, rhs.width, rhs.height, rhs.depth, rhs.pitch,
+ rhs.num_levels, rhs.pixel_format, rhs.component_type, rhs.type, rhs.target);
+}
+
+std::string SurfaceParams::TargetName() const {
+ switch (target) {
+ case SurfaceTarget::Texture1D:
+ return "1D";
+ case SurfaceTarget::TextureBuffer:
+ return "TexBuffer";
+ case SurfaceTarget::Texture2D:
+ return "2D";
+ case SurfaceTarget::Texture3D:
+ return "3D";
+ case SurfaceTarget::Texture1DArray:
+ return "1DArray";
+ case SurfaceTarget::Texture2DArray:
+ return "2DArray";
+ case SurfaceTarget::TextureCubemap:
+ return "Cube";
+ case SurfaceTarget::TextureCubeArray:
+ return "CubeArray";
+ default:
+ LOG_CRITICAL(HW_GPU, "Unimplemented surface_target={}", static_cast<u32>(target));
+ UNREACHABLE();
+ return fmt::format("TUK({})", static_cast<u32>(target));
+ }
+}
+
+} // namespace VideoCommon
diff --git a/src/video_core/texture_cache/surface_params.h b/src/video_core/texture_cache/surface_params.h
new file mode 100644
index 000000000..358d6757c
--- /dev/null
+++ b/src/video_core/texture_cache/surface_params.h
@@ -0,0 +1,286 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <map>
+
+#include "common/alignment.h"
+#include "common/bit_util.h"
+#include "common/cityhash.h"
+#include "common/common_types.h"
+#include "video_core/engines/fermi_2d.h"
+#include "video_core/engines/maxwell_3d.h"
+#include "video_core/shader/shader_ir.h"
+#include "video_core/surface.h"
+#include "video_core/textures/decoders.h"
+
+namespace VideoCommon {
+
+using VideoCore::Surface::SurfaceCompression;
+
+class SurfaceParams {
+public:
+ /// Creates SurfaceCachedParams from a texture configuration.
+ static SurfaceParams CreateForTexture(Core::System& system,
+ const Tegra::Texture::FullTextureInfo& config,
+ const VideoCommon::Shader::Sampler& entry);
+
+ /// Creates SurfaceCachedParams for a depth buffer configuration.
+ static SurfaceParams CreateForDepthBuffer(
+ Core::System& system, u32 zeta_width, u32 zeta_height, Tegra::DepthFormat format,
+ u32 block_width, u32 block_height, u32 block_depth,
+ Tegra::Engines::Maxwell3D::Regs::InvMemoryLayout type);
+
+ /// Creates SurfaceCachedParams from a framebuffer configuration.
+ static SurfaceParams CreateForFramebuffer(Core::System& system, std::size_t index);
+
+ /// Creates SurfaceCachedParams from a Fermi2D surface configuration.
+ static SurfaceParams CreateForFermiCopySurface(
+ const Tegra::Engines::Fermi2D::Regs::Surface& config);
+
+ std::size_t Hash() const {
+ return static_cast<std::size_t>(
+ Common::CityHash64(reinterpret_cast<const char*>(this), sizeof(*this)));
+ }
+
+ bool operator==(const SurfaceParams& rhs) const;
+
+ bool operator!=(const SurfaceParams& rhs) const {
+ return !operator==(rhs);
+ }
+
+ std::size_t GetGuestSizeInBytes() const {
+ return GetInnerMemorySize(false, false, false);
+ }
+
+ std::size_t GetHostSizeInBytes() const {
+ std::size_t host_size_in_bytes;
+ if (GetCompressionType() == SurfaceCompression::Converted) {
+ constexpr std::size_t rgb8_bpp = 4ULL;
+ // ASTC is uncompressed in software, in emulated as RGBA8
+ host_size_in_bytes = 0;
+ for (u32 level = 0; level < num_levels; ++level) {
+ host_size_in_bytes += GetConvertedMipmapSize(level);
+ }
+ } else {
+ host_size_in_bytes = GetInnerMemorySize(true, false, false);
+ }
+ return host_size_in_bytes;
+ }
+
+ u32 GetBlockAlignedWidth() const {
+ return Common::AlignUp(width, 64 / GetBytesPerPixel());
+ }
+
+ /// Returns the width of a given mipmap level.
+ u32 GetMipWidth(u32 level) const {
+ return std::max(1U, width >> level);
+ }
+
+ /// Returns the height of a given mipmap level.
+ u32 GetMipHeight(u32 level) const {
+ return std::max(1U, height >> level);
+ }
+
+ /// Returns the depth of a given mipmap level.
+ u32 GetMipDepth(u32 level) const {
+ return is_layered ? depth : std::max(1U, depth >> level);
+ }
+
+ /// Returns the block height of a given mipmap level.
+ u32 GetMipBlockHeight(u32 level) const;
+
+ /// Returns the block depth of a given mipmap level.
+ u32 GetMipBlockDepth(u32 level) const;
+
+ /// Returns the best possible row/pitch alignment for the surface.
+ u32 GetRowAlignment(u32 level) const {
+ const u32 bpp =
+ GetCompressionType() == SurfaceCompression::Converted ? 4 : GetBytesPerPixel();
+ return 1U << Common::CountTrailingZeroes32(GetMipWidth(level) * bpp);
+ }
+
+ /// Returns the offset in bytes in guest memory of a given mipmap level.
+ std::size_t GetGuestMipmapLevelOffset(u32 level) const;
+
+ /// Returns the offset in bytes in host memory (linear) of a given mipmap level.
+ std::size_t GetHostMipmapLevelOffset(u32 level) const;
+
+ /// Returns the offset in bytes in host memory (linear) of a given mipmap level
+ /// for a texture that is converted in host gpu.
+ std::size_t GetConvertedMipmapOffset(u32 level) const;
+
+ /// Returns the size in bytes in guest memory of a given mipmap level.
+ std::size_t GetGuestMipmapSize(u32 level) const {
+ return GetInnerMipmapMemorySize(level, false, false);
+ }
+
+ /// Returns the size in bytes in host memory (linear) of a given mipmap level.
+ std::size_t GetHostMipmapSize(u32 level) const {
+ return GetInnerMipmapMemorySize(level, true, false) * GetNumLayers();
+ }
+
+ std::size_t GetConvertedMipmapSize(u32 level) const;
+
+ /// Returns the size of a layer in bytes in guest memory.
+ std::size_t GetGuestLayerSize() const {
+ return GetLayerSize(false, false);
+ }
+
+ /// Returns the size of a layer in bytes in host memory for a given mipmap level.
+ std::size_t GetHostLayerSize(u32 level) const {
+ ASSERT(target != VideoCore::Surface::SurfaceTarget::Texture3D);
+ return GetInnerMipmapMemorySize(level, true, false);
+ }
+
+ /// Returns the max possible mipmap that the texture can have in host gpu
+ u32 MaxPossibleMipmap() const {
+ const u32 max_mipmap_w = Common::Log2Ceil32(width) + 1U;
+ const u32 max_mipmap_h = Common::Log2Ceil32(height) + 1U;
+ const u32 max_mipmap = std::max(max_mipmap_w, max_mipmap_h);
+ if (target != VideoCore::Surface::SurfaceTarget::Texture3D)
+ return max_mipmap;
+ return std::max(max_mipmap, Common::Log2Ceil32(depth) + 1U);
+ }
+
+ /// Returns if the guest surface is a compressed surface.
+ bool IsCompressed() const {
+ return GetDefaultBlockHeight() > 1 || GetDefaultBlockWidth() > 1;
+ }
+
+ /// Returns the default block width.
+ u32 GetDefaultBlockWidth() const {
+ return VideoCore::Surface::GetDefaultBlockWidth(pixel_format);
+ }
+
+ /// Returns the default block height.
+ u32 GetDefaultBlockHeight() const {
+ return VideoCore::Surface::GetDefaultBlockHeight(pixel_format);
+ }
+
+ /// Returns the bits per pixel.
+ u32 GetBitsPerPixel() const {
+ return VideoCore::Surface::GetFormatBpp(pixel_format);
+ }
+
+ /// Returns the bytes per pixel.
+ u32 GetBytesPerPixel() const {
+ return VideoCore::Surface::GetBytesPerPixel(pixel_format);
+ }
+
+ /// Returns true if the pixel format is a depth and/or stencil format.
+ bool IsPixelFormatZeta() const {
+ return pixel_format >= VideoCore::Surface::PixelFormat::MaxColorFormat &&
+ pixel_format < VideoCore::Surface::PixelFormat::MaxDepthStencilFormat;
+ }
+
+ /// Returns how the compression should be handled for this texture.
+ SurfaceCompression GetCompressionType() const {
+ return VideoCore::Surface::GetFormatCompressionType(pixel_format);
+ }
+
+ /// Returns is the surface is a TextureBuffer type of surface.
+ bool IsBuffer() const {
+ return target == VideoCore::Surface::SurfaceTarget::TextureBuffer;
+ }
+
+ /// Returns the debug name of the texture for use in graphic debuggers.
+ std::string TargetName() const;
+
+ // Helper used for out of class size calculations
+ static std::size_t AlignLayered(const std::size_t out_size, const u32 block_height,
+ const u32 block_depth) {
+ return Common::AlignBits(out_size,
+ Tegra::Texture::GetGOBSizeShift() + block_height + block_depth);
+ }
+
+ /// Converts a width from a type of surface into another. This helps represent the
+ /// equivalent value between compressed/non-compressed textures.
+ static u32 ConvertWidth(u32 width, VideoCore::Surface::PixelFormat pixel_format_from,
+ VideoCore::Surface::PixelFormat pixel_format_to) {
+ const u32 bw1 = VideoCore::Surface::GetDefaultBlockWidth(pixel_format_from);
+ const u32 bw2 = VideoCore::Surface::GetDefaultBlockWidth(pixel_format_to);
+ return (width * bw2 + bw1 - 1) / bw1;
+ }
+
+ /// Converts a height from a type of surface into another. This helps represent the
+ /// equivalent value between compressed/non-compressed textures.
+ static u32 ConvertHeight(u32 height, VideoCore::Surface::PixelFormat pixel_format_from,
+ VideoCore::Surface::PixelFormat pixel_format_to) {
+ const u32 bh1 = VideoCore::Surface::GetDefaultBlockHeight(pixel_format_from);
+ const u32 bh2 = VideoCore::Surface::GetDefaultBlockHeight(pixel_format_to);
+ return (height * bh2 + bh1 - 1) / bh1;
+ }
+
+ // Finds the maximun possible width between 2 2D layers of different formats
+ static u32 IntersectWidth(const SurfaceParams& src_params, const SurfaceParams& dst_params,
+ const u32 src_level, const u32 dst_level) {
+ const u32 bw1 = src_params.GetDefaultBlockWidth();
+ const u32 bw2 = dst_params.GetDefaultBlockWidth();
+ const u32 t_src_width = (src_params.GetMipWidth(src_level) * bw2 + bw1 - 1) / bw1;
+ const u32 t_dst_width = (dst_params.GetMipWidth(dst_level) * bw1 + bw2 - 1) / bw2;
+ return std::min(t_src_width, t_dst_width);
+ }
+
+ // Finds the maximun possible height between 2 2D layers of different formats
+ static u32 IntersectHeight(const SurfaceParams& src_params, const SurfaceParams& dst_params,
+ const u32 src_level, const u32 dst_level) {
+ const u32 bh1 = src_params.GetDefaultBlockHeight();
+ const u32 bh2 = dst_params.GetDefaultBlockHeight();
+ const u32 t_src_height = (src_params.GetMipHeight(src_level) * bh2 + bh1 - 1) / bh1;
+ const u32 t_dst_height = (dst_params.GetMipHeight(dst_level) * bh1 + bh2 - 1) / bh2;
+ return std::min(t_src_height, t_dst_height);
+ }
+
+ bool is_tiled;
+ bool srgb_conversion;
+ bool is_layered;
+ u32 block_width;
+ u32 block_height;
+ u32 block_depth;
+ u32 tile_width_spacing;
+ u32 width;
+ u32 height;
+ u32 depth;
+ u32 pitch;
+ u32 num_levels;
+ u32 emulated_levels;
+ VideoCore::Surface::PixelFormat pixel_format;
+ VideoCore::Surface::ComponentType component_type;
+ VideoCore::Surface::SurfaceType type;
+ VideoCore::Surface::SurfaceTarget target;
+
+private:
+ /// Returns the size of a given mipmap level inside a layer.
+ std::size_t GetInnerMipmapMemorySize(u32 level, bool as_host_size, bool uncompressed) const;
+
+ /// Returns the size of all mipmap levels and aligns as needed.
+ std::size_t GetInnerMemorySize(bool as_host_size, bool layer_only, bool uncompressed) const {
+ return GetLayerSize(as_host_size, uncompressed) * (layer_only ? 1U : depth);
+ }
+
+ /// Returns the size of a layer
+ std::size_t GetLayerSize(bool as_host_size, bool uncompressed) const;
+
+ std::size_t GetNumLayers() const {
+ return is_layered ? depth : 1;
+ }
+
+ /// Returns true if these parameters are from a layered surface.
+ bool IsLayered() const;
+};
+
+} // namespace VideoCommon
+
+namespace std {
+
+template <>
+struct hash<VideoCommon::SurfaceParams> {
+ std::size_t operator()(const VideoCommon::SurfaceParams& k) const noexcept {
+ return k.Hash();
+ }
+};
+
+} // namespace std
diff --git a/src/video_core/texture_cache/surface_view.cpp b/src/video_core/texture_cache/surface_view.cpp
new file mode 100644
index 000000000..467696a4c
--- /dev/null
+++ b/src/video_core/texture_cache/surface_view.cpp
@@ -0,0 +1,23 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include <tuple>
+
+#include "common/common_types.h"
+#include "video_core/texture_cache/surface_view.h"
+
+namespace VideoCommon {
+
+std::size_t ViewParams::Hash() const {
+ return static_cast<std::size_t>(base_layer) ^ static_cast<std::size_t>(num_layers << 16) ^
+ (static_cast<std::size_t>(base_level) << 24) ^
+ (static_cast<std::size_t>(num_levels) << 32) ^ (static_cast<std::size_t>(target) << 36);
+}
+
+bool ViewParams::operator==(const ViewParams& rhs) const {
+ return std::tie(base_layer, num_layers, base_level, num_levels, target) ==
+ std::tie(rhs.base_layer, rhs.num_layers, rhs.base_level, rhs.num_levels, rhs.target);
+}
+
+} // namespace VideoCommon
diff --git a/src/video_core/texture_cache/surface_view.h b/src/video_core/texture_cache/surface_view.h
new file mode 100644
index 000000000..04ca5639b
--- /dev/null
+++ b/src/video_core/texture_cache/surface_view.h
@@ -0,0 +1,67 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <functional>
+
+#include "common/common_types.h"
+#include "video_core/surface.h"
+#include "video_core/texture_cache/surface_params.h"
+
+namespace VideoCommon {
+
+struct ViewParams {
+ ViewParams(VideoCore::Surface::SurfaceTarget target, u32 base_layer, u32 num_layers,
+ u32 base_level, u32 num_levels)
+ : target{target}, base_layer{base_layer}, num_layers{num_layers}, base_level{base_level},
+ num_levels{num_levels} {}
+
+ std::size_t Hash() const;
+
+ bool operator==(const ViewParams& rhs) const;
+
+ VideoCore::Surface::SurfaceTarget target{};
+ u32 base_layer{};
+ u32 num_layers{};
+ u32 base_level{};
+ u32 num_levels{};
+
+ bool IsLayered() const {
+ switch (target) {
+ case VideoCore::Surface::SurfaceTarget::Texture1DArray:
+ case VideoCore::Surface::SurfaceTarget::Texture2DArray:
+ case VideoCore::Surface::SurfaceTarget::TextureCubemap:
+ case VideoCore::Surface::SurfaceTarget::TextureCubeArray:
+ return true;
+ default:
+ return false;
+ }
+ }
+};
+
+class ViewBase {
+public:
+ ViewBase(const ViewParams& params) : params{params} {}
+
+ const ViewParams& GetViewParams() const {
+ return params;
+ }
+
+protected:
+ ViewParams params;
+};
+
+} // namespace VideoCommon
+
+namespace std {
+
+template <>
+struct hash<VideoCommon::ViewParams> {
+ std::size_t operator()(const VideoCommon::ViewParams& k) const noexcept {
+ return k.Hash();
+ }
+};
+
+} // namespace std
diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h
new file mode 100644
index 000000000..c9e72531a
--- /dev/null
+++ b/src/video_core/texture_cache/texture_cache.h
@@ -0,0 +1,814 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <algorithm>
+#include <array>
+#include <memory>
+#include <mutex>
+#include <set>
+#include <tuple>
+#include <unordered_map>
+#include <vector>
+
+#include <boost/icl/interval_map.hpp>
+#include <boost/range/iterator_range.hpp>
+
+#include "common/assert.h"
+#include "common/common_types.h"
+#include "common/math_util.h"
+#include "core/core.h"
+#include "core/memory.h"
+#include "core/settings.h"
+#include "video_core/engines/fermi_2d.h"
+#include "video_core/engines/maxwell_3d.h"
+#include "video_core/gpu.h"
+#include "video_core/memory_manager.h"
+#include "video_core/rasterizer_interface.h"
+#include "video_core/surface.h"
+#include "video_core/texture_cache/copy_params.h"
+#include "video_core/texture_cache/surface_base.h"
+#include "video_core/texture_cache/surface_params.h"
+#include "video_core/texture_cache/surface_view.h"
+
+namespace Tegra::Texture {
+struct FullTextureInfo;
+}
+
+namespace VideoCore {
+class RasterizerInterface;
+}
+
+namespace VideoCommon {
+
+using VideoCore::Surface::PixelFormat;
+
+using VideoCore::Surface::SurfaceTarget;
+using RenderTargetConfig = Tegra::Engines::Maxwell3D::Regs::RenderTargetConfig;
+
+template <typename TSurface, typename TView>
+class TextureCache {
+ using IntervalMap = boost::icl::interval_map<CacheAddr, std::set<TSurface>>;
+ using IntervalType = typename IntervalMap::interval_type;
+
+public:
+ void InvalidateRegion(CacheAddr addr, std::size_t size) {
+ std::lock_guard lock{mutex};
+
+ for (const auto& surface : GetSurfacesInRegion(addr, size)) {
+ Unregister(surface);
+ }
+ }
+
+ /***
+ * `Guard` guarantees that rendertargets don't unregister themselves if the
+ * collide. Protection is currently only done on 3D slices.
+ ***/
+ void GuardRenderTargets(bool new_guard) {
+ guard_render_targets = new_guard;
+ }
+
+ void GuardSamplers(bool new_guard) {
+ guard_samplers = new_guard;
+ }
+
+ void FlushRegion(CacheAddr addr, std::size_t size) {
+ std::lock_guard lock{mutex};
+
+ auto surfaces = GetSurfacesInRegion(addr, size);
+ if (surfaces.empty()) {
+ return;
+ }
+ std::sort(surfaces.begin(), surfaces.end(), [](const TSurface& a, const TSurface& b) {
+ return a->GetModificationTick() < b->GetModificationTick();
+ });
+ for (const auto& surface : surfaces) {
+ FlushSurface(surface);
+ }
+ }
+
+ TView GetTextureSurface(const Tegra::Texture::FullTextureInfo& config,
+ const VideoCommon::Shader::Sampler& entry) {
+ std::lock_guard lock{mutex};
+ const auto gpu_addr{config.tic.Address()};
+ if (!gpu_addr) {
+ return {};
+ }
+ const auto params{SurfaceParams::CreateForTexture(system, config, entry)};
+ const auto [surface, view] = GetSurface(gpu_addr, params, true, false);
+ if (guard_samplers) {
+ sampled_textures.push_back(surface);
+ }
+ return view;
+ }
+
+ bool TextureBarrier() {
+ const bool any_rt =
+ std::any_of(sampled_textures.begin(), sampled_textures.end(),
+ [](const auto& surface) { return surface->IsRenderTarget(); });
+ sampled_textures.clear();
+ return any_rt;
+ }
+
+ TView GetDepthBufferSurface(bool preserve_contents) {
+ std::lock_guard lock{mutex};
+ auto& maxwell3d = system.GPU().Maxwell3D();
+
+ if (!maxwell3d.dirty_flags.zeta_buffer) {
+ return depth_buffer.view;
+ }
+ maxwell3d.dirty_flags.zeta_buffer = false;
+
+ const auto& regs{maxwell3d.regs};
+ const auto gpu_addr{regs.zeta.Address()};
+ if (!gpu_addr || !regs.zeta_enable) {
+ SetEmptyDepthBuffer();
+ return {};
+ }
+ const auto depth_params{SurfaceParams::CreateForDepthBuffer(
+ system, regs.zeta_width, regs.zeta_height, regs.zeta.format,
+ regs.zeta.memory_layout.block_width, regs.zeta.memory_layout.block_height,
+ regs.zeta.memory_layout.block_depth, regs.zeta.memory_layout.type)};
+ auto surface_view = GetSurface(gpu_addr, depth_params, preserve_contents, true);
+ if (depth_buffer.target)
+ depth_buffer.target->MarkAsRenderTarget(false);
+ depth_buffer.target = surface_view.first;
+ depth_buffer.view = surface_view.second;
+ if (depth_buffer.target)
+ depth_buffer.target->MarkAsRenderTarget(true);
+ return surface_view.second;
+ }
+
+ TView GetColorBufferSurface(std::size_t index, bool preserve_contents) {
+ std::lock_guard lock{mutex};
+ ASSERT(index < Tegra::Engines::Maxwell3D::Regs::NumRenderTargets);
+ auto& maxwell3d = system.GPU().Maxwell3D();
+ if (!maxwell3d.dirty_flags.color_buffer[index]) {
+ return render_targets[index].view;
+ }
+ maxwell3d.dirty_flags.color_buffer.reset(index);
+
+ const auto& regs{maxwell3d.regs};
+ if (index >= regs.rt_control.count || regs.rt[index].Address() == 0 ||
+ regs.rt[index].format == Tegra::RenderTargetFormat::NONE) {
+ SetEmptyColorBuffer(index);
+ return {};
+ }
+
+ const auto& config{regs.rt[index]};
+ const auto gpu_addr{config.Address()};
+ if (!gpu_addr) {
+ SetEmptyColorBuffer(index);
+ return {};
+ }
+
+ auto surface_view = GetSurface(gpu_addr, SurfaceParams::CreateForFramebuffer(system, index),
+ preserve_contents, true);
+ if (render_targets[index].target)
+ render_targets[index].target->MarkAsRenderTarget(false);
+ render_targets[index].target = surface_view.first;
+ render_targets[index].view = surface_view.second;
+ if (render_targets[index].target)
+ render_targets[index].target->MarkAsRenderTarget(true);
+ return surface_view.second;
+ }
+
+ void MarkColorBufferInUse(std::size_t index) {
+ if (auto& render_target = render_targets[index].target) {
+ render_target->MarkAsModified(true, Tick());
+ }
+ }
+
+ void MarkDepthBufferInUse() {
+ if (depth_buffer.target) {
+ depth_buffer.target->MarkAsModified(true, Tick());
+ }
+ }
+
+ void SetEmptyDepthBuffer() {
+ if (depth_buffer.target == nullptr) {
+ return;
+ }
+ depth_buffer.target->MarkAsRenderTarget(false);
+ depth_buffer.target = nullptr;
+ depth_buffer.view = nullptr;
+ }
+
+ void SetEmptyColorBuffer(std::size_t index) {
+ if (render_targets[index].target == nullptr) {
+ return;
+ }
+ render_targets[index].target->MarkAsRenderTarget(false);
+ render_targets[index].target = nullptr;
+ render_targets[index].view = nullptr;
+ }
+
+ void DoFermiCopy(const Tegra::Engines::Fermi2D::Regs::Surface& src_config,
+ const Tegra::Engines::Fermi2D::Regs::Surface& dst_config,
+ const Tegra::Engines::Fermi2D::Config& copy_config) {
+ std::lock_guard lock{mutex};
+ std::pair<TSurface, TView> dst_surface = GetFermiSurface(dst_config);
+ std::pair<TSurface, TView> src_surface = GetFermiSurface(src_config);
+ ImageBlit(src_surface.second, dst_surface.second, copy_config);
+ dst_surface.first->MarkAsModified(true, Tick());
+ }
+
+ TSurface TryFindFramebufferSurface(const u8* host_ptr) {
+ const CacheAddr cache_addr = ToCacheAddr(host_ptr);
+ if (!cache_addr) {
+ return nullptr;
+ }
+ const CacheAddr page = cache_addr >> registry_page_bits;
+ std::vector<TSurface>& list = registry[page];
+ for (auto& surface : list) {
+ if (surface->GetCacheAddr() == cache_addr) {
+ return surface;
+ }
+ }
+ return nullptr;
+ }
+
+ u64 Tick() {
+ return ++ticks;
+ }
+
+protected:
+ TextureCache(Core::System& system, VideoCore::RasterizerInterface& rasterizer)
+ : system{system}, rasterizer{rasterizer} {
+ for (std::size_t i = 0; i < Tegra::Engines::Maxwell3D::Regs::NumRenderTargets; i++) {
+ SetEmptyColorBuffer(i);
+ }
+
+ SetEmptyDepthBuffer();
+ staging_cache.SetSize(2);
+
+ const auto make_siblings = [this](PixelFormat a, PixelFormat b) {
+ siblings_table[static_cast<std::size_t>(a)] = b;
+ siblings_table[static_cast<std::size_t>(b)] = a;
+ };
+ std::fill(siblings_table.begin(), siblings_table.end(), PixelFormat::Invalid);
+ make_siblings(PixelFormat::Z16, PixelFormat::R16U);
+ make_siblings(PixelFormat::Z32F, PixelFormat::R32F);
+ make_siblings(PixelFormat::Z32FS8, PixelFormat::RG32F);
+
+ sampled_textures.reserve(64);
+ }
+
+ ~TextureCache() = default;
+
+ virtual TSurface CreateSurface(GPUVAddr gpu_addr, const SurfaceParams& params) = 0;
+
+ virtual void ImageCopy(TSurface& src_surface, TSurface& dst_surface,
+ const CopyParams& copy_params) = 0;
+
+ virtual void ImageBlit(TView& src_view, TView& dst_view,
+ const Tegra::Engines::Fermi2D::Config& copy_config) = 0;
+
+ // Depending on the backend, a buffer copy can be slow as it means deoptimizing the texture
+ // and reading it from a sepparate buffer.
+ virtual void BufferCopy(TSurface& src_surface, TSurface& dst_surface) = 0;
+
+ void Register(TSurface surface) {
+ const GPUVAddr gpu_addr = surface->GetGpuAddr();
+ const CacheAddr cache_ptr = ToCacheAddr(system.GPU().MemoryManager().GetPointer(gpu_addr));
+ const std::size_t size = surface->GetSizeInBytes();
+ const std::optional<VAddr> cpu_addr =
+ system.GPU().MemoryManager().GpuToCpuAddress(gpu_addr);
+ if (!cache_ptr || !cpu_addr) {
+ LOG_CRITICAL(HW_GPU, "Failed to register surface with unmapped gpu_address 0x{:016x}",
+ gpu_addr);
+ return;
+ }
+ const bool continuous = system.GPU().MemoryManager().IsBlockContinuous(gpu_addr, size);
+ surface->MarkAsContinuous(continuous);
+ surface->SetCacheAddr(cache_ptr);
+ surface->SetCpuAddr(*cpu_addr);
+ RegisterInnerCache(surface);
+ surface->MarkAsRegistered(true);
+ rasterizer.UpdatePagesCachedCount(*cpu_addr, size, 1);
+ }
+
+ void Unregister(TSurface surface) {
+ if (guard_render_targets && surface->IsProtected()) {
+ return;
+ }
+ const GPUVAddr gpu_addr = surface->GetGpuAddr();
+ const CacheAddr cache_ptr = surface->GetCacheAddr();
+ const std::size_t size = surface->GetSizeInBytes();
+ const VAddr cpu_addr = surface->GetCpuAddr();
+ rasterizer.UpdatePagesCachedCount(cpu_addr, size, -1);
+ UnregisterInnerCache(surface);
+ surface->MarkAsRegistered(false);
+ ReserveSurface(surface->GetSurfaceParams(), surface);
+ }
+
+ TSurface GetUncachedSurface(const GPUVAddr gpu_addr, const SurfaceParams& params) {
+ if (const auto surface = TryGetReservedSurface(params); surface) {
+ surface->SetGpuAddr(gpu_addr);
+ return surface;
+ }
+ // No reserved surface available, create a new one and reserve it
+ auto new_surface{CreateSurface(gpu_addr, params)};
+ return new_surface;
+ }
+
+ std::pair<TSurface, TView> GetFermiSurface(
+ const Tegra::Engines::Fermi2D::Regs::Surface& config) {
+ SurfaceParams params = SurfaceParams::CreateForFermiCopySurface(config);
+ const GPUVAddr gpu_addr = config.Address();
+ return GetSurface(gpu_addr, params, true, false);
+ }
+
+ Core::System& system;
+
+private:
+ enum class RecycleStrategy : u32 {
+ Ignore = 0,
+ Flush = 1,
+ BufferCopy = 3,
+ };
+
+ /**
+ * `PickStrategy` takes care of selecting a proper strategy to deal with a texture recycle.
+ * @param overlaps, the overlapping surfaces registered in the cache.
+ * @param params, the paremeters on the new surface.
+ * @param gpu_addr, the starting address of the new surface.
+ * @param untopological, tells the recycler that the texture has no way to match the overlaps
+ * due to topological reasons.
+ **/
+ RecycleStrategy PickStrategy(std::vector<TSurface>& overlaps, const SurfaceParams& params,
+ const GPUVAddr gpu_addr, const MatchTopologyResult untopological) {
+ if (Settings::values.use_accurate_gpu_emulation) {
+ return RecycleStrategy::Flush;
+ }
+ // 3D Textures decision
+ if (params.block_depth > 1 || params.target == SurfaceTarget::Texture3D) {
+ return RecycleStrategy::Flush;
+ }
+ for (auto s : overlaps) {
+ const auto& s_params = s->GetSurfaceParams();
+ if (s_params.block_depth > 1 || s_params.target == SurfaceTarget::Texture3D) {
+ return RecycleStrategy::Flush;
+ }
+ }
+ // Untopological decision
+ if (untopological == MatchTopologyResult::CompressUnmatch) {
+ return RecycleStrategy::Flush;
+ }
+ if (untopological == MatchTopologyResult::FullMatch && !params.is_tiled) {
+ return RecycleStrategy::Flush;
+ }
+ return RecycleStrategy::Ignore;
+ }
+
+ /**
+ * `RecycleSurface` es a method we use to decide what to do with textures we can't resolve in
+ *the cache It has 2 implemented strategies: Ignore and Flush. Ignore just unregisters all the
+ *overlaps and loads the new texture. Flush, flushes all the overlaps into memory and loads the
+ *new surface from that data.
+ * @param overlaps, the overlapping surfaces registered in the cache.
+ * @param params, the paremeters on the new surface.
+ * @param gpu_addr, the starting address of the new surface.
+ * @param preserve_contents, tells if the new surface should be loaded from meory or left blank
+ * @param untopological, tells the recycler that the texture has no way to match the overlaps
+ * due to topological reasons.
+ **/
+ std::pair<TSurface, TView> RecycleSurface(std::vector<TSurface>& overlaps,
+ const SurfaceParams& params, const GPUVAddr gpu_addr,
+ const bool preserve_contents,
+ const MatchTopologyResult untopological) {
+ const bool do_load = preserve_contents && Settings::values.use_accurate_gpu_emulation;
+ for (auto& surface : overlaps) {
+ Unregister(surface);
+ }
+ switch (PickStrategy(overlaps, params, gpu_addr, untopological)) {
+ case RecycleStrategy::Ignore: {
+ return InitializeSurface(gpu_addr, params, do_load);
+ }
+ case RecycleStrategy::Flush: {
+ std::sort(overlaps.begin(), overlaps.end(),
+ [](const TSurface& a, const TSurface& b) -> bool {
+ return a->GetModificationTick() < b->GetModificationTick();
+ });
+ for (auto& surface : overlaps) {
+ FlushSurface(surface);
+ }
+ return InitializeSurface(gpu_addr, params, preserve_contents);
+ }
+ case RecycleStrategy::BufferCopy: {
+ auto new_surface = GetUncachedSurface(gpu_addr, params);
+ BufferCopy(overlaps[0], new_surface);
+ return {new_surface, new_surface->GetMainView()};
+ }
+ default: {
+ UNIMPLEMENTED_MSG("Unimplemented Texture Cache Recycling Strategy!");
+ return InitializeSurface(gpu_addr, params, do_load);
+ }
+ }
+ }
+
+ /**
+ * `RebuildSurface` this method takes a single surface and recreates into another that
+ * may differ in format, target or width alingment.
+ * @param current_surface, the registered surface in the cache which we want to convert.
+ * @param params, the new surface params which we'll use to recreate the surface.
+ **/
+ std::pair<TSurface, TView> RebuildSurface(TSurface current_surface, const SurfaceParams& params,
+ bool is_render) {
+ const auto gpu_addr = current_surface->GetGpuAddr();
+ const auto& cr_params = current_surface->GetSurfaceParams();
+ TSurface new_surface;
+ if (cr_params.pixel_format != params.pixel_format && !is_render &&
+ GetSiblingFormat(cr_params.pixel_format) == params.pixel_format) {
+ SurfaceParams new_params = params;
+ new_params.pixel_format = cr_params.pixel_format;
+ new_params.component_type = cr_params.component_type;
+ new_params.type = cr_params.type;
+ new_surface = GetUncachedSurface(gpu_addr, new_params);
+ } else {
+ new_surface = GetUncachedSurface(gpu_addr, params);
+ }
+ const auto& final_params = new_surface->GetSurfaceParams();
+ if (cr_params.type != final_params.type ||
+ (cr_params.component_type != final_params.component_type)) {
+ BufferCopy(current_surface, new_surface);
+ } else {
+ std::vector<CopyParams> bricks = current_surface->BreakDown(final_params);
+ for (auto& brick : bricks) {
+ ImageCopy(current_surface, new_surface, brick);
+ }
+ }
+ Unregister(current_surface);
+ Register(new_surface);
+ new_surface->MarkAsModified(current_surface->IsModified(), Tick());
+ return {new_surface, new_surface->GetMainView()};
+ }
+
+ /**
+ * `ManageStructuralMatch` this method takes a single surface and checks with the new surface's
+ * params if it's an exact match, we return the main view of the registered surface. If it's
+ * formats don't match, we rebuild the surface. We call this last method a `Mirage`. If formats
+ * match but the targets don't, we create an overview View of the registered surface.
+ * @param current_surface, the registered surface in the cache which we want to convert.
+ * @param params, the new surface params which we want to check.
+ **/
+ std::pair<TSurface, TView> ManageStructuralMatch(TSurface current_surface,
+ const SurfaceParams& params, bool is_render) {
+ const bool is_mirage = !current_surface->MatchFormat(params.pixel_format);
+ const bool matches_target = current_surface->MatchTarget(params.target);
+ const auto match_check = [&]() -> std::pair<TSurface, TView> {
+ if (matches_target) {
+ return {current_surface, current_surface->GetMainView()};
+ }
+ return {current_surface, current_surface->EmplaceOverview(params)};
+ };
+ if (!is_mirage) {
+ return match_check();
+ }
+ if (!is_render && GetSiblingFormat(current_surface->GetFormat()) == params.pixel_format) {
+ return match_check();
+ }
+ return RebuildSurface(current_surface, params, is_render);
+ }
+
+ /**
+ * `TryReconstructSurface` unlike `RebuildSurface` where we know the registered surface
+ * matches the candidate in some way, we got no guarantess here. We try to see if the overlaps
+ * are sublayers/mipmaps of the new surface, if they all match we end up recreating a surface
+ * for them, else we return nothing.
+ * @param overlaps, the overlapping surfaces registered in the cache.
+ * @param params, the paremeters on the new surface.
+ * @param gpu_addr, the starting address of the new surface.
+ **/
+ std::optional<std::pair<TSurface, TView>> TryReconstructSurface(std::vector<TSurface>& overlaps,
+ const SurfaceParams& params,
+ const GPUVAddr gpu_addr) {
+ if (params.target == SurfaceTarget::Texture3D) {
+ return {};
+ }
+ bool modified = false;
+ TSurface new_surface = GetUncachedSurface(gpu_addr, params);
+ u32 passed_tests = 0;
+ for (auto& surface : overlaps) {
+ const SurfaceParams& src_params = surface->GetSurfaceParams();
+ if (src_params.is_layered || src_params.num_levels > 1) {
+ // We send this cases to recycle as they are more complex to handle
+ return {};
+ }
+ const std::size_t candidate_size = surface->GetSizeInBytes();
+ auto mipmap_layer{new_surface->GetLayerMipmap(surface->GetGpuAddr())};
+ if (!mipmap_layer) {
+ continue;
+ }
+ const auto [layer, mipmap] = *mipmap_layer;
+ if (new_surface->GetMipmapSize(mipmap) != candidate_size) {
+ continue;
+ }
+ modified |= surface->IsModified();
+ // Now we got all the data set up
+ const u32 width = SurfaceParams::IntersectWidth(src_params, params, 0, mipmap);
+ const u32 height = SurfaceParams::IntersectHeight(src_params, params, 0, mipmap);
+ const CopyParams copy_params(0, 0, 0, 0, 0, layer, 0, mipmap, width, height, 1);
+ passed_tests++;
+ ImageCopy(surface, new_surface, copy_params);
+ }
+ if (passed_tests == 0) {
+ return {};
+ // In Accurate GPU all tests should pass, else we recycle
+ } else if (Settings::values.use_accurate_gpu_emulation && passed_tests != overlaps.size()) {
+ return {};
+ }
+ for (auto surface : overlaps) {
+ Unregister(surface);
+ }
+ new_surface->MarkAsModified(modified, Tick());
+ Register(new_surface);
+ return {{new_surface, new_surface->GetMainView()}};
+ }
+
+ /**
+ * `GetSurface` gets the starting address and parameters of a candidate surface and tries
+ * to find a matching surface within the cache. This is done in 3 big steps. The first is to
+ * check the 1st Level Cache in order to find an exact match, if we fail, we move to step 2.
+ * Step 2 is checking if there are any overlaps at all, if none, we just load the texture from
+ * memory else we move to step 3. Step 3 consists on figuring the relationship between the
+ * candidate texture and the overlaps. We divide the scenarios depending if there's 1 or many
+ * overlaps. If there's many, we just try to reconstruct a new surface out of them based on the
+ * candidate's parameters, if we fail, we recycle. When there's only 1 overlap then we have to
+ * check if the candidate is a view (layer/mipmap) of the overlap or if the registered surface
+ * is a mipmap/layer of the candidate. In this last case we reconstruct a new surface.
+ * @param gpu_addr, the starting address of the candidate surface.
+ * @param params, the paremeters on the candidate surface.
+ * @param preserve_contents, tells if the new surface should be loaded from meory or left blank.
+ **/
+ std::pair<TSurface, TView> GetSurface(const GPUVAddr gpu_addr, const SurfaceParams& params,
+ bool preserve_contents, bool is_render) {
+ const auto host_ptr{system.GPU().MemoryManager().GetPointer(gpu_addr)};
+ const auto cache_addr{ToCacheAddr(host_ptr)};
+
+ // Step 0: guarantee a valid surface
+ if (!cache_addr) {
+ // Return a null surface if it's invalid
+ SurfaceParams new_params = params;
+ new_params.width = 1;
+ new_params.height = 1;
+ new_params.depth = 1;
+ new_params.block_height = 0;
+ new_params.block_depth = 0;
+ return InitializeSurface(gpu_addr, new_params, false);
+ }
+
+ // Step 1
+ // Check Level 1 Cache for a fast structural match. If candidate surface
+ // matches at certain level we are pretty much done.
+ if (const auto iter = l1_cache.find(cache_addr); iter != l1_cache.end()) {
+ TSurface& current_surface = iter->second;
+ const auto topological_result = current_surface->MatchesTopology(params);
+ if (topological_result != MatchTopologyResult::FullMatch) {
+ std::vector<TSurface> overlaps{current_surface};
+ return RecycleSurface(overlaps, params, gpu_addr, preserve_contents,
+ topological_result);
+ }
+ const auto struct_result = current_surface->MatchesStructure(params);
+ if (struct_result != MatchStructureResult::None &&
+ (params.target != SurfaceTarget::Texture3D ||
+ current_surface->MatchTarget(params.target))) {
+ if (struct_result == MatchStructureResult::FullMatch) {
+ return ManageStructuralMatch(current_surface, params, is_render);
+ } else {
+ return RebuildSurface(current_surface, params, is_render);
+ }
+ }
+ }
+
+ // Step 2
+ // Obtain all possible overlaps in the memory region
+ const std::size_t candidate_size = params.GetGuestSizeInBytes();
+ auto overlaps{GetSurfacesInRegion(cache_addr, candidate_size)};
+
+ // If none are found, we are done. we just load the surface and create it.
+ if (overlaps.empty()) {
+ return InitializeSurface(gpu_addr, params, preserve_contents);
+ }
+
+ // Step 3
+ // Now we need to figure the relationship between the texture and its overlaps
+ // we do a topological test to ensure we can find some relationship. If it fails
+ // inmediatly recycle the texture
+ for (const auto& surface : overlaps) {
+ const auto topological_result = surface->MatchesTopology(params);
+ if (topological_result != MatchTopologyResult::FullMatch) {
+ return RecycleSurface(overlaps, params, gpu_addr, preserve_contents,
+ topological_result);
+ }
+ }
+
+ // Split cases between 1 overlap or many.
+ if (overlaps.size() == 1) {
+ TSurface current_surface = overlaps[0];
+ // First check if the surface is within the overlap. If not, it means
+ // two things either the candidate surface is a supertexture of the overlap
+ // or they don't match in any known way.
+ if (!current_surface->IsInside(gpu_addr, gpu_addr + candidate_size)) {
+ if (current_surface->GetGpuAddr() == gpu_addr) {
+ std::optional<std::pair<TSurface, TView>> view =
+ TryReconstructSurface(overlaps, params, gpu_addr);
+ if (view) {
+ return *view;
+ }
+ }
+ return RecycleSurface(overlaps, params, gpu_addr, preserve_contents,
+ MatchTopologyResult::FullMatch);
+ }
+ // Now we check if the candidate is a mipmap/layer of the overlap
+ std::optional<TView> view =
+ current_surface->EmplaceView(params, gpu_addr, candidate_size);
+ if (view) {
+ const bool is_mirage = !current_surface->MatchFormat(params.pixel_format);
+ if (is_mirage) {
+ // On a mirage view, we need to recreate the surface under this new view
+ // and then obtain a view again.
+ SurfaceParams new_params = current_surface->GetSurfaceParams();
+ const u32 wh = SurfaceParams::ConvertWidth(
+ new_params.width, new_params.pixel_format, params.pixel_format);
+ const u32 hh = SurfaceParams::ConvertHeight(
+ new_params.height, new_params.pixel_format, params.pixel_format);
+ new_params.width = wh;
+ new_params.height = hh;
+ new_params.pixel_format = params.pixel_format;
+ std::pair<TSurface, TView> pair =
+ RebuildSurface(current_surface, new_params, is_render);
+ std::optional<TView> mirage_view =
+ pair.first->EmplaceView(params, gpu_addr, candidate_size);
+ if (mirage_view)
+ return {pair.first, *mirage_view};
+ return RecycleSurface(overlaps, params, gpu_addr, preserve_contents,
+ MatchTopologyResult::FullMatch);
+ }
+ return {current_surface, *view};
+ }
+ // The next case is unsafe, so if we r in accurate GPU, just skip it
+ if (Settings::values.use_accurate_gpu_emulation) {
+ return RecycleSurface(overlaps, params, gpu_addr, preserve_contents,
+ MatchTopologyResult::FullMatch);
+ }
+ // This is the case the texture is a part of the parent.
+ if (current_surface->MatchesSubTexture(params, gpu_addr)) {
+ return RebuildSurface(current_surface, params, is_render);
+ }
+ } else {
+ // If there are many overlaps, odds are they are subtextures of the candidate
+ // surface. We try to construct a new surface based on the candidate parameters,
+ // using the overlaps. If a single overlap fails, this will fail.
+ std::optional<std::pair<TSurface, TView>> view =
+ TryReconstructSurface(overlaps, params, gpu_addr);
+ if (view) {
+ return *view;
+ }
+ }
+ // We failed all the tests, recycle the overlaps into a new texture.
+ return RecycleSurface(overlaps, params, gpu_addr, preserve_contents,
+ MatchTopologyResult::FullMatch);
+ }
+
+ std::pair<TSurface, TView> InitializeSurface(GPUVAddr gpu_addr, const SurfaceParams& params,
+ bool preserve_contents) {
+ auto new_surface{GetUncachedSurface(gpu_addr, params)};
+ Register(new_surface);
+ if (preserve_contents) {
+ LoadSurface(new_surface);
+ }
+ return {new_surface, new_surface->GetMainView()};
+ }
+
+ void LoadSurface(const TSurface& surface) {
+ staging_cache.GetBuffer(0).resize(surface->GetHostSizeInBytes());
+ surface->LoadBuffer(system.GPU().MemoryManager(), staging_cache);
+ surface->UploadTexture(staging_cache.GetBuffer(0));
+ surface->MarkAsModified(false, Tick());
+ }
+
+ void FlushSurface(const TSurface& surface) {
+ if (!surface->IsModified()) {
+ return;
+ }
+ staging_cache.GetBuffer(0).resize(surface->GetHostSizeInBytes());
+ surface->DownloadTexture(staging_cache.GetBuffer(0));
+ surface->FlushBuffer(system.GPU().MemoryManager(), staging_cache);
+ surface->MarkAsModified(false, Tick());
+ }
+
+ void RegisterInnerCache(TSurface& surface) {
+ const CacheAddr cache_addr = surface->GetCacheAddr();
+ CacheAddr start = cache_addr >> registry_page_bits;
+ const CacheAddr end = (surface->GetCacheAddrEnd() - 1) >> registry_page_bits;
+ l1_cache[cache_addr] = surface;
+ while (start <= end) {
+ registry[start].push_back(surface);
+ start++;
+ }
+ }
+
+ void UnregisterInnerCache(TSurface& surface) {
+ const CacheAddr cache_addr = surface->GetCacheAddr();
+ CacheAddr start = cache_addr >> registry_page_bits;
+ const CacheAddr end = (surface->GetCacheAddrEnd() - 1) >> registry_page_bits;
+ l1_cache.erase(cache_addr);
+ while (start <= end) {
+ auto& reg{registry[start]};
+ reg.erase(std::find(reg.begin(), reg.end(), surface));
+ start++;
+ }
+ }
+
+ std::vector<TSurface> GetSurfacesInRegion(const CacheAddr cache_addr, const std::size_t size) {
+ if (size == 0) {
+ return {};
+ }
+ const CacheAddr cache_addr_end = cache_addr + size;
+ CacheAddr start = cache_addr >> registry_page_bits;
+ const CacheAddr end = (cache_addr_end - 1) >> registry_page_bits;
+ std::vector<TSurface> surfaces;
+ while (start <= end) {
+ std::vector<TSurface>& list = registry[start];
+ for (auto& surface : list) {
+ if (!surface->IsPicked() && surface->Overlaps(cache_addr, cache_addr_end)) {
+ surface->MarkAsPicked(true);
+ surfaces.push_back(surface);
+ }
+ }
+ start++;
+ }
+ for (auto& surface : surfaces) {
+ surface->MarkAsPicked(false);
+ }
+ return surfaces;
+ }
+
+ void ReserveSurface(const SurfaceParams& params, TSurface surface) {
+ surface_reserve[params].push_back(std::move(surface));
+ }
+
+ TSurface TryGetReservedSurface(const SurfaceParams& params) {
+ auto search{surface_reserve.find(params)};
+ if (search == surface_reserve.end()) {
+ return {};
+ }
+ for (auto& surface : search->second) {
+ if (!surface->IsRegistered()) {
+ return surface;
+ }
+ }
+ return {};
+ }
+
+ constexpr PixelFormat GetSiblingFormat(PixelFormat format) const {
+ return siblings_table[static_cast<std::size_t>(format)];
+ }
+
+ struct FramebufferTargetInfo {
+ TSurface target;
+ TView view;
+ };
+
+ VideoCore::RasterizerInterface& rasterizer;
+
+ u64 ticks{};
+
+ // Guards the cache for protection conflicts.
+ bool guard_render_targets{};
+ bool guard_samplers{};
+
+ // The siblings table is for formats that can inter exchange with one another
+ // without causing issues. This is only valid when a conflict occurs on a non
+ // rendering use.
+ std::array<PixelFormat, static_cast<std::size_t>(PixelFormat::Max)> siblings_table;
+
+ // The internal Cache is different for the Texture Cache. It's based on buckets
+ // of 1MB. This fits better for the purpose of this cache as textures are normaly
+ // large in size.
+ static constexpr u64 registry_page_bits{20};
+ static constexpr u64 registry_page_size{1 << registry_page_bits};
+ std::unordered_map<CacheAddr, std::vector<TSurface>> registry;
+
+ // The L1 Cache is used for fast texture lookup before checking the overlaps
+ // This avoids calculating size and other stuffs.
+ std::unordered_map<CacheAddr, TSurface> l1_cache;
+
+ /// The surface reserve is a "backup" cache, this is where we put unique surfaces that have
+ /// previously been used. This is to prevent surfaces from being constantly created and
+ /// destroyed when used with different surface parameters.
+ std::unordered_map<SurfaceParams, std::vector<TSurface>> surface_reserve;
+ std::array<FramebufferTargetInfo, Tegra::Engines::Maxwell3D::Regs::NumRenderTargets>
+ render_targets;
+ FramebufferTargetInfo depth_buffer;
+
+ std::vector<TSurface> sampled_textures;
+
+ StagingCache staging_cache;
+ std::recursive_mutex mutex;
+};
+
+} // namespace VideoCommon
diff --git a/src/video_core/textures/convert.cpp b/src/video_core/textures/convert.cpp
index 82050bd51..f3efa7eb0 100644
--- a/src/video_core/textures/convert.cpp
+++ b/src/video_core/textures/convert.cpp
@@ -62,19 +62,19 @@ static void ConvertZ24S8ToS8Z24(u8* data, u32 width, u32 height) {
SwapS8Z24ToZ24S8<true>(data, width, height);
}
-void ConvertFromGuestToHost(u8* data, PixelFormat pixel_format, u32 width, u32 height, u32 depth,
- bool convert_astc, bool convert_s8z24) {
+void ConvertFromGuestToHost(u8* in_data, u8* out_data, PixelFormat pixel_format, u32 width,
+ u32 height, u32 depth, bool convert_astc, bool convert_s8z24) {
if (convert_astc && IsPixelFormatASTC(pixel_format)) {
// Convert ASTC pixel formats to RGBA8, as most desktop GPUs do not support ASTC.
u32 block_width{};
u32 block_height{};
std::tie(block_width, block_height) = GetASTCBlockSize(pixel_format);
- const std::vector<u8> rgba8_data =
- Tegra::Texture::ASTC::Decompress(data, width, height, depth, block_width, block_height);
- std::copy(rgba8_data.begin(), rgba8_data.end(), data);
+ const std::vector<u8> rgba8_data = Tegra::Texture::ASTC::Decompress(
+ in_data, width, height, depth, block_width, block_height);
+ std::copy(rgba8_data.begin(), rgba8_data.end(), out_data);
} else if (convert_s8z24 && pixel_format == PixelFormat::S8Z24) {
- Tegra::Texture::ConvertS8Z24ToZ24S8(data, width, height);
+ Tegra::Texture::ConvertS8Z24ToZ24S8(in_data, width, height);
}
}
@@ -90,4 +90,4 @@ void ConvertFromHostToGuest(u8* data, PixelFormat pixel_format, u32 width, u32 h
}
}
-} // namespace Tegra::Texture \ No newline at end of file
+} // namespace Tegra::Texture
diff --git a/src/video_core/textures/convert.h b/src/video_core/textures/convert.h
index 12542e71c..d5d6c77bb 100644
--- a/src/video_core/textures/convert.h
+++ b/src/video_core/textures/convert.h
@@ -12,10 +12,11 @@ enum class PixelFormat;
namespace Tegra::Texture {
-void ConvertFromGuestToHost(u8* data, VideoCore::Surface::PixelFormat pixel_format, u32 width,
- u32 height, u32 depth, bool convert_astc, bool convert_s8z24);
+void ConvertFromGuestToHost(u8* in_data, u8* out_data, VideoCore::Surface::PixelFormat pixel_format,
+ u32 width, u32 height, u32 depth, bool convert_astc,
+ bool convert_s8z24);
void ConvertFromHostToGuest(u8* data, VideoCore::Surface::PixelFormat pixel_format, u32 width,
u32 height, u32 depth, bool convert_astc, bool convert_s8z24);
-} // namespace Tegra::Texture \ No newline at end of file
+} // namespace Tegra::Texture
diff --git a/src/video_core/textures/decoders.cpp b/src/video_core/textures/decoders.cpp
index 217805386..7e8295944 100644
--- a/src/video_core/textures/decoders.cpp
+++ b/src/video_core/textures/decoders.cpp
@@ -36,10 +36,16 @@ struct alignas(64) SwizzleTable {
std::array<std::array<u16, M>, N> values{};
};
-constexpr u32 gob_size_x = 64;
-constexpr u32 gob_size_y = 8;
-constexpr u32 gob_size_z = 1;
-constexpr u32 gob_size = gob_size_x * gob_size_y * gob_size_z;
+constexpr u32 gob_size_x_shift = 6;
+constexpr u32 gob_size_y_shift = 3;
+constexpr u32 gob_size_z_shift = 0;
+constexpr u32 gob_size_shift = gob_size_x_shift + gob_size_y_shift + gob_size_z_shift;
+
+constexpr u32 gob_size_x = 1U << gob_size_x_shift;
+constexpr u32 gob_size_y = 1U << gob_size_y_shift;
+constexpr u32 gob_size_z = 1U << gob_size_z_shift;
+constexpr u32 gob_size = 1U << gob_size_shift;
+
constexpr u32 fast_swizzle_align = 16;
constexpr auto legacy_swizzle_table = SwizzleTable<gob_size_y, gob_size_x, gob_size_z>();
@@ -171,14 +177,16 @@ void SwizzledData(u8* const swizzled_data, u8* const unswizzled_data, const bool
void CopySwizzledData(u32 width, u32 height, u32 depth, u32 bytes_per_pixel,
u32 out_bytes_per_pixel, u8* const swizzled_data, u8* const unswizzled_data,
bool unswizzle, u32 block_height, u32 block_depth, u32 width_spacing) {
+ const u32 block_height_size{1U << block_height};
+ const u32 block_depth_size{1U << block_depth};
if (bytes_per_pixel % 3 != 0 && (width * bytes_per_pixel) % fast_swizzle_align == 0) {
SwizzledData<true>(swizzled_data, unswizzled_data, unswizzle, width, height, depth,
- bytes_per_pixel, out_bytes_per_pixel, block_height, block_depth,
- width_spacing);
+ bytes_per_pixel, out_bytes_per_pixel, block_height_size,
+ block_depth_size, width_spacing);
} else {
SwizzledData<false>(swizzled_data, unswizzled_data, unswizzle, width, height, depth,
- bytes_per_pixel, out_bytes_per_pixel, block_height, block_depth,
- width_spacing);
+ bytes_per_pixel, out_bytes_per_pixel, block_height_size,
+ block_depth_size, width_spacing);
}
}
@@ -248,7 +256,9 @@ std::vector<u8> UnswizzleTexture(u8* address, u32 tile_size_x, u32 tile_size_y,
}
void SwizzleSubrect(u32 subrect_width, u32 subrect_height, u32 source_pitch, u32 swizzled_width,
- u32 bytes_per_pixel, u8* swizzled_data, u8* unswizzled_data, u32 block_height) {
+ u32 bytes_per_pixel, u8* swizzled_data, u8* unswizzled_data,
+ u32 block_height_bit) {
+ const u32 block_height = 1U << block_height_bit;
const u32 image_width_in_gobs{(swizzled_width * bytes_per_pixel + (gob_size_x - 1)) /
gob_size_x};
for (u32 line = 0; line < subrect_height; ++line) {
@@ -269,8 +279,9 @@ void SwizzleSubrect(u32 subrect_width, u32 subrect_height, u32 source_pitch, u32
}
void UnswizzleSubrect(u32 subrect_width, u32 subrect_height, u32 dest_pitch, u32 swizzled_width,
- u32 bytes_per_pixel, u8* swizzled_data, u8* unswizzled_data, u32 block_height,
- u32 offset_x, u32 offset_y) {
+ u32 bytes_per_pixel, u8* swizzled_data, u8* unswizzled_data,
+ u32 block_height_bit, u32 offset_x, u32 offset_y) {
+ const u32 block_height = 1U << block_height_bit;
for (u32 line = 0; line < subrect_height; ++line) {
const u32 y2 = line + offset_y;
const u32 gob_address_y = (y2 / (gob_size_y * block_height)) * gob_size * block_height +
@@ -289,8 +300,9 @@ void UnswizzleSubrect(u32 subrect_width, u32 subrect_height, u32 dest_pitch, u32
}
void SwizzleKepler(const u32 width, const u32 height, const u32 dst_x, const u32 dst_y,
- const u32 block_height, const std::size_t copy_size, const u8* source_data,
+ const u32 block_height_bit, const std::size_t copy_size, const u8* source_data,
u8* swizzle_data) {
+ const u32 block_height = 1U << block_height_bit;
const u32 image_width_in_gobs{(width + gob_size_x - 1) / gob_size_x};
std::size_t count = 0;
for (std::size_t y = dst_y; y < height && count < copy_size; ++y) {
@@ -356,9 +368,9 @@ std::vector<u8> DecodeTexture(const std::vector<u8>& texture_data, TextureFormat
std::size_t CalculateSize(bool tiled, u32 bytes_per_pixel, u32 width, u32 height, u32 depth,
u32 block_height, u32 block_depth) {
if (tiled) {
- const u32 aligned_width = Common::AlignUp(width * bytes_per_pixel, gob_size_x);
- const u32 aligned_height = Common::AlignUp(height, gob_size_y * block_height);
- const u32 aligned_depth = Common::AlignUp(depth, gob_size_z * block_depth);
+ const u32 aligned_width = Common::AlignBits(width * bytes_per_pixel, gob_size_x_shift);
+ const u32 aligned_height = Common::AlignBits(height, gob_size_y_shift + block_height);
+ const u32 aligned_depth = Common::AlignBits(depth, gob_size_z_shift + block_depth);
return aligned_width * aligned_height * aligned_depth;
} else {
return width * height * depth * bytes_per_pixel;
diff --git a/src/video_core/textures/decoders.h b/src/video_core/textures/decoders.h
index e072d8401..eaec9b5a5 100644
--- a/src/video_core/textures/decoders.h
+++ b/src/video_core/textures/decoders.h
@@ -12,8 +12,8 @@ namespace Tegra::Texture {
// GOBSize constant. Calculated by 64 bytes in x multiplied by 8 y coords, represents
// an small rect of (64/bytes_per_pixel)X8.
-inline std::size_t GetGOBSize() {
- return 512;
+inline std::size_t GetGOBSizeShift() {
+ return 9;
}
/// Unswizzles a swizzled texture without changing its format.
diff --git a/src/video_core/textures/texture.h b/src/video_core/textures/texture.h
index 219bfd559..e3be018b9 100644
--- a/src/video_core/textures/texture.h
+++ b/src/video_core/textures/texture.h
@@ -52,9 +52,9 @@ enum class TextureFormat : u32 {
DXT45 = 0x26,
DXN1 = 0x27,
DXN2 = 0x28,
- Z24S8 = 0x29,
+ S8Z24 = 0x29,
X8Z24 = 0x2a,
- S8Z24 = 0x2b,
+ Z24S8 = 0x2b,
X4V4Z24__COV4R4V = 0x2c,
X4V4Z24__COV8R8V = 0x2d,
V8Z24__COV4R12V = 0x2e,
@@ -172,12 +172,16 @@ struct TICEntry {
BitField<26, 1, u32> use_header_opt_control;
BitField<27, 1, u32> depth_texture;
BitField<28, 4, u32> max_mip_level;
+
+ BitField<0, 16, u32> buffer_high_width_minus_one;
};
union {
BitField<0, 16, u32> width_minus_1;
BitField<22, 1, u32> srgb_conversion;
BitField<23, 4, TextureType> texture_type;
BitField<29, 3, u32> border_size;
+
+ BitField<0, 16, u32> buffer_low_width_minus_one;
};
union {
BitField<0, 16, u32> height_minus_1;
@@ -206,7 +210,10 @@ struct TICEntry {
}
u32 Width() const {
- return width_minus_1 + 1;
+ if (header_version != TICHeaderVersion::OneDBuffer) {
+ return width_minus_1 + 1;
+ }
+ return (buffer_high_width_minus_one << 16) | buffer_low_width_minus_one;
}
u32 Height() const {
@@ -219,20 +226,17 @@ struct TICEntry {
u32 BlockWidth() const {
ASSERT(IsTiled());
- // The block height is stored in log2 format.
- return 1 << block_width;
+ return block_width;
}
u32 BlockHeight() const {
ASSERT(IsTiled());
- // The block height is stored in log2 format.
- return 1 << block_height;
+ return block_height;
}
u32 BlockDepth() const {
ASSERT(IsTiled());
- // The block height is stored in log2 format.
- return 1 << block_depth;
+ return block_depth;
}
bool IsTiled() const {
@@ -240,6 +244,15 @@ struct TICEntry {
header_version == TICHeaderVersion::BlockLinearColorKey;
}
+ bool IsLineal() const {
+ return header_version == TICHeaderVersion::Pitch ||
+ header_version == TICHeaderVersion::PitchColorKey;
+ }
+
+ bool IsBuffer() const {
+ return header_version == TICHeaderVersion::OneDBuffer;
+ }
+
bool IsSrgbConversionEnabled() const {
return srgb_conversion != 0;
}
diff --git a/src/yuzu/configuration/config.cpp b/src/yuzu/configuration/config.cpp
index 5a456e603..73978ff5b 100644
--- a/src/yuzu/configuration/config.cpp
+++ b/src/yuzu/configuration/config.cpp
@@ -436,7 +436,8 @@ void Config::ReadControlValues() {
void Config::ReadCoreValues() {
qt_config->beginGroup(QStringLiteral("Core"));
- Settings::values.use_cpu_jit = ReadSetting(QStringLiteral("use_cpu_jit"), true).toBool();
+ Settings::values.cpu_jit_enabled =
+ ReadSetting(QStringLiteral("cpu_jit_enabled"), true).toBool();
Settings::values.use_multi_core = ReadSetting(QStringLiteral("use_multi_core"), false).toBool();
qt_config->endGroup();
@@ -475,6 +476,7 @@ void Config::ReadDebuggingValues() {
Settings::values.dump_nso = ReadSetting(QStringLiteral("dump_nso"), false).toBool();
Settings::values.reporting_services =
ReadSetting(QStringLiteral("reporting_services"), false).toBool();
+ Settings::values.quest_flag = ReadSetting(QStringLiteral("quest_flag"), false).toBool();
qt_config->endGroup();
}
@@ -829,7 +831,7 @@ void Config::SaveControlValues() {
void Config::SaveCoreValues() {
qt_config->beginGroup(QStringLiteral("Core"));
- WriteSetting(QStringLiteral("use_cpu_jit"), Settings::values.use_cpu_jit, true);
+ WriteSetting(QStringLiteral("cpu_jit_enabled"), Settings::values.cpu_jit_enabled, true);
WriteSetting(QStringLiteral("use_multi_core"), Settings::values.use_multi_core, false);
qt_config->endGroup();
@@ -858,6 +860,7 @@ void Config::SaveDebuggingValues() {
QString::fromStdString(Settings::values.program_args), QStringLiteral(""));
WriteSetting(QStringLiteral("dump_exefs"), Settings::values.dump_exefs, false);
WriteSetting(QStringLiteral("dump_nso"), Settings::values.dump_nso, false);
+ WriteSetting(QStringLiteral("quest_flag"), Settings::values.quest_flag, false);
qt_config->endGroup();
}
diff --git a/src/yuzu/configuration/configure_debug.cpp b/src/yuzu/configuration/configure_debug.cpp
index 63426fe4f..9a13bb797 100644
--- a/src/yuzu/configuration/configure_debug.cpp
+++ b/src/yuzu/configuration/configure_debug.cpp
@@ -37,6 +37,7 @@ void ConfigureDebug::SetConfiguration() {
ui->dump_exefs->setChecked(Settings::values.dump_exefs);
ui->dump_decompressed_nso->setChecked(Settings::values.dump_nso);
ui->reporting_services->setChecked(Settings::values.reporting_services);
+ ui->quest_flag->setChecked(Settings::values.quest_flag);
}
void ConfigureDebug::ApplyConfiguration() {
@@ -48,6 +49,7 @@ void ConfigureDebug::ApplyConfiguration() {
Settings::values.dump_exefs = ui->dump_exefs->isChecked();
Settings::values.dump_nso = ui->dump_decompressed_nso->isChecked();
Settings::values.reporting_services = ui->reporting_services->isChecked();
+ Settings::values.quest_flag = ui->quest_flag->isChecked();
Debugger::ToggleConsole();
Log::Filter filter;
filter.ParseFilterString(Settings::values.log_filter);
diff --git a/src/yuzu/configuration/configure_debug.ui b/src/yuzu/configuration/configure_debug.ui
index 4a7e3dc3d..7e109cef0 100644
--- a/src/yuzu/configuration/configure_debug.ui
+++ b/src/yuzu/configuration/configure_debug.ui
@@ -7,7 +7,7 @@
<x>0</x>
<y>0</y>
<width>400</width>
- <height>357</height>
+ <height>474</height>
</rect>
</property>
<property name="windowTitle">
@@ -181,6 +181,22 @@
</widget>
</item>
<item>
+ <widget class="QGroupBox" name="groupBox_5">
+ <property name="title">
+ <string>Advanced</string>
+ </property>
+ <layout class="QVBoxLayout" name="verticalLayout">
+ <item>
+ <widget class="QCheckBox" name="quest_flag">
+ <property name="text">
+ <string>Kiosk (Quest) Mode</string>
+ </property>
+ </widget>
+ </item>
+ </layout>
+ </widget>
+ </item>
+ <item>
<spacer name="verticalSpacer">
<property name="orientation">
<enum>Qt::Vertical</enum>
diff --git a/src/yuzu/configuration/configure_general.cpp b/src/yuzu/configuration/configure_general.cpp
index 06d368dfc..7a6e921cd 100644
--- a/src/yuzu/configuration/configure_general.cpp
+++ b/src/yuzu/configuration/configure_general.cpp
@@ -22,8 +22,6 @@ ConfigureGeneral::ConfigureGeneral(QWidget* parent)
connect(ui->toggle_deepscan, &QCheckBox::stateChanged, this,
[] { UISettings::values.is_game_list_reload_pending.exchange(true); });
-
- ui->use_cpu_jit->setEnabled(!Core::System::GetInstance().IsPoweredOn());
}
ConfigureGeneral::~ConfigureGeneral() = default;
@@ -33,7 +31,6 @@ void ConfigureGeneral::SetConfiguration() {
ui->toggle_check_exit->setChecked(UISettings::values.confirm_before_closing);
ui->toggle_user_on_boot->setChecked(UISettings::values.select_user_on_boot);
ui->theme_combobox->setCurrentIndex(ui->theme_combobox->findData(UISettings::values.theme));
- ui->use_cpu_jit->setChecked(Settings::values.use_cpu_jit);
}
void ConfigureGeneral::ApplyConfiguration() {
@@ -42,8 +39,6 @@ void ConfigureGeneral::ApplyConfiguration() {
UISettings::values.select_user_on_boot = ui->toggle_user_on_boot->isChecked();
UISettings::values.theme =
ui->theme_combobox->itemData(ui->theme_combobox->currentIndex()).toString();
-
- Settings::values.use_cpu_jit = ui->use_cpu_jit->isChecked();
}
void ConfigureGeneral::changeEvent(QEvent* event) {
diff --git a/src/yuzu/configuration/configure_general.ui b/src/yuzu/configuration/configure_general.ui
index 1a5721fe7..184fdd329 100644
--- a/src/yuzu/configuration/configure_general.ui
+++ b/src/yuzu/configuration/configure_general.ui
@@ -51,26 +51,6 @@
</widget>
</item>
<item>
- <widget class="QGroupBox" name="PerformanceGroupBox">
- <property name="title">
- <string>Performance</string>
- </property>
- <layout class="QHBoxLayout" name="PerformanceHorizontalLayout">
- <item>
- <layout class="QVBoxLayout" name="PerformanceVerticalLayout">
- <item>
- <widget class="QCheckBox" name="use_cpu_jit">
- <property name="text">
- <string>Enable CPU JIT</string>
- </property>
- </widget>
- </item>
- </layout>
- </item>
- </layout>
- </widget>
- </item>
- <item>
<widget class="QGroupBox" name="theme_group_box">
<property name="title">
<string>Theme</string>
diff --git a/src/yuzu/main.cpp b/src/yuzu/main.cpp
index 47e46f574..ae21f4753 100644
--- a/src/yuzu/main.cpp
+++ b/src/yuzu/main.cpp
@@ -750,6 +750,9 @@ void GMainWindow::OnDisplayTitleBars(bool show) {
QStringList GMainWindow::GetUnsupportedGLExtensions() {
QStringList unsupported_ext;
+ if (!GLAD_GL_ARB_buffer_storage) {
+ unsupported_ext.append(QStringLiteral("ARB_buffer_storage"));
+ }
if (!GLAD_GL_ARB_direct_state_access) {
unsupported_ext.append(QStringLiteral("ARB_direct_state_access"));
}
diff --git a/src/yuzu_cmd/config.cpp b/src/yuzu_cmd/config.cpp
index 9ac92e937..30b22341b 100644
--- a/src/yuzu_cmd/config.cpp
+++ b/src/yuzu_cmd/config.cpp
@@ -340,7 +340,7 @@ void Config::ReadValues() {
}
// Core
- Settings::values.use_cpu_jit = sdl2_config->GetBoolean("Core", "use_cpu_jit", true);
+ Settings::values.cpu_jit_enabled = sdl2_config->GetBoolean("Core", "cpu_jit_enabled", true);
Settings::values.use_multi_core = sdl2_config->GetBoolean("Core", "use_multi_core", false);
// Renderer
@@ -383,6 +383,7 @@ void Config::ReadValues() {
Settings::values.dump_nso = sdl2_config->GetBoolean("Debugging", "dump_nso", false);
Settings::values.reporting_services =
sdl2_config->GetBoolean("Debugging", "reporting_services", false);
+ Settings::values.quest_flag = sdl2_config->GetBoolean("Debugging", "quest_flag", false);
const auto title_list = sdl2_config->Get("AddOns", "title_ids", "");
std::stringstream ss(title_list);
diff --git a/src/yuzu_cmd/default_ini.h b/src/yuzu_cmd/default_ini.h
index 6538af098..4f1add434 100644
--- a/src/yuzu_cmd/default_ini.h
+++ b/src/yuzu_cmd/default_ini.h
@@ -78,7 +78,7 @@ touch_device=
[Core]
# Whether to use the Just-In-Time (JIT) compiler for CPU emulation
# 0: Interpreter (slow), 1 (default): JIT (fast)
-use_cpu_jit =
+cpu_jit_enabled =
# Whether to use multi-core for CPU emulation
# 0 (default): Disabled, 1: Enabled
@@ -224,6 +224,9 @@ gdbstub_port=24689
dump_exefs=false
# Determines whether or not yuzu will dump all NSOs it attempts to load while loading them
dump_nso=false
+# Determines whether or not yuzu will report to the game that the emulated console is in Kiosk Mode
+# false: Retail/Normal Mode (default), true: Kiosk Mode
+quest_flag =
[WebService]
# Whether or not to enable telemetry
diff --git a/src/yuzu_cmd/emu_window/emu_window_sdl2_gl.cpp b/src/yuzu_cmd/emu_window/emu_window_sdl2_gl.cpp
index e2d3df180..f91b071bf 100644
--- a/src/yuzu_cmd/emu_window/emu_window_sdl2_gl.cpp
+++ b/src/yuzu_cmd/emu_window/emu_window_sdl2_gl.cpp
@@ -52,6 +52,10 @@ private:
bool EmuWindow_SDL2_GL::SupportsRequiredGLExtensions() {
std::vector<std::string> unsupported_ext;
+ if (!GLAD_GL_ARB_buffer_storage)
+ unsupported_ext.push_back("ARB_buffer_storage");
+ if (!GLAD_GL_ARB_direct_state_access)
+ unsupported_ext.push_back("ARB_direct_state_access");
if (!GLAD_GL_ARB_vertex_type_10f_11f_11f_rev)
unsupported_ext.push_back("ARB_vertex_type_10f_11f_11f_rev");
if (!GLAD_GL_ARB_texture_mirror_clamp_to_edge)
diff --git a/src/yuzu_tester/config.cpp b/src/yuzu_tester/config.cpp
index d7e0d408d..b96b7d279 100644
--- a/src/yuzu_tester/config.cpp
+++ b/src/yuzu_tester/config.cpp
@@ -114,7 +114,7 @@ void Config::ReadValues() {
}
// Core
- Settings::values.use_cpu_jit = sdl2_config->GetBoolean("Core", "use_cpu_jit", true);
+ Settings::values.cpu_jit_enabled = sdl2_config->GetBoolean("Core", "cpu_jit_enabled", true);
Settings::values.use_multi_core = sdl2_config->GetBoolean("Core", "use_multi_core", false);
// Renderer
diff --git a/src/yuzu_tester/default_ini.h b/src/yuzu_tester/default_ini.h
index 46a9960cd..0f880d8c7 100644
--- a/src/yuzu_tester/default_ini.h
+++ b/src/yuzu_tester/default_ini.h
@@ -10,7 +10,7 @@ const char* sdl2_config_file = R"(
[Core]
# Whether to use the Just-In-Time (JIT) compiler for CPU emulation
# 0: Interpreter (slow), 1 (default): JIT (fast)
-use_cpu_jit =
+cpu_jit_enabled =
# Whether to use multi-core for CPU emulation
# 0 (default): Disabled, 1: Enabled